summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--arch/arm/mach-s5p6442/clock.c2
-rw-r--r--arch/arm/mach-s5pv210/clock.c115
-rw-r--r--arch/arm/plat-s5p/irq-eint.c2
-rw-r--r--arch/arm/plat-samsung/include/plat/sdhci.h4
-rw-r--r--arch/mips/alchemy/common/power.c12
-rw-r--r--arch/mips/alchemy/common/sleeper.S81
-rw-r--r--arch/mips/ar7/platform.c2
-rw-r--r--arch/mips/bcm47xx/Makefile2
-rw-r--r--arch/mips/bcm47xx/nvram.c94
-rw-r--r--arch/mips/bcm47xx/setup.c39
-rw-r--r--arch/mips/include/asm/mach-ar7/ar7.h6
-rw-r--r--arch/mips/include/asm/mach-ar7/gpio.h2
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h3
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/nvram.h36
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/gpio.h2
-rw-r--r--arch/mips/kernel/ftrace.c184
-rw-r--r--arch/mips/kernel/mcount.S55
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c87
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/loongson/Kconfig6
-rw-r--r--arch/mips/loongson/common/Makefile1
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_ehci.c2
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_ide.c15
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_isa.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_ohci.c2
-rw-r--r--arch/mips/loongson/common/rtc.c43
-rw-r--r--arch/mips/math-emu/dp_simple.c1
-rw-r--r--arch/mips/math-emu/sp_simple.c1
-rw-r--r--arch/mips/oprofile/op_model_loongson2.c8
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/include/asm/ptrace.h32
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kernel/perf_event.c5
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh6
-rw-r--r--arch/powerpc/lib/Makefile4
-rw-r--r--arch/powerpc/lib/crtsavres.S129
-rw-r--r--arch/powerpc/lib/feature-fixups.c17
-rw-r--r--arch/powerpc/platforms/iseries/pci.c6
-rw-r--r--arch/x86/kernel/pci-calgary_64.c4
-rw-r--r--arch/x86/mm/pat_rbtree.c34
-rw-r--r--drivers/bluetooth/bluecard_cs.c2
-rw-r--r--drivers/bluetooth/hci_bcsp.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c68
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_main.c33
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c17
-rw-r--r--drivers/net/ll_temac_main.c18
-rw-r--r--drivers/net/mv643xx_eth.c9
-rw-r--r--drivers/net/ne.c4
-rw-r--r--drivers/net/qlge/qlge_main.c11
-rw-r--r--drivers/net/s2io.c101
-rw-r--r--drivers/net/s2io.h4
-rw-r--r--drivers/net/sb1250-mac.c1
-rw-r--r--drivers/net/usb/rndis_host.c18
-rw-r--r--drivers/net/usb/usbnet.c5
-rw-r--r--drivers/net/virtio_net.c28
-rw-r--r--drivers/net/vxge/vxge-main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c7
-rw-r--r--drivers/pcmcia/ds.c3
-rw-r--r--drivers/vhost/net.c12
-rw-r--r--drivers/vhost/vhost.c86
-rw-r--r--drivers/vhost/vhost.h8
-rw-r--r--fs/afs/write.c1
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/ceph/auth_x.c2
-rw-r--r--fs/ceph/caps.c21
-rw-r--r--fs/ceph/crush/mapper.c41
-rw-r--r--fs/ceph/debugfs.c2
-rw-r--r--fs/ceph/inode.c19
-rw-r--r--fs/ceph/mds_client.c6
-rw-r--r--fs/ceph/messenger.c4
-rw-r--r--fs/ceph/mon_client.c3
-rw-r--r--fs/ceph/osd_client.c3
-rw-r--r--fs/ceph/osdmap.c1
-rw-r--r--fs/fs-writeback.c335
-rw-r--r--fs/splice.c9
-rw-r--r--include/drm/ttm/ttm_page_alloc.h4
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/fs.h13
-rw-r--r--include/linux/mv643xx_eth.h5
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/linux/rbtree.h13
-rw-r--r--include/linux/writeback.h7
-rw-r--r--include/net/sch_generic.h20
-rw-r--r--include/net/xfrm.h2
-rw-r--r--lib/rbtree.c116
-rw-r--r--mm/backing-dev.c17
-rw-r--r--mm/page-writeback.c3
-rw-r--r--net/bridge/br_multicast.c21
-rw-r--r--net/bridge/br_netfilter.c3
-rw-r--r--net/core/dev.c18
-rw-r--r--net/core/ethtool.c41
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c6
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--scripts/mod/modpost.c5
107 files changed, 1448 insertions, 869 deletions
diff --git a/Makefile b/Makefile
index 914e5f514888..f9835c80a274 100644
--- a/Makefile
+++ b/Makefile
@@ -886,7 +886,7 @@ $(vmlinux-dirs): prepare scripts
# Store (new) KERNELRELASE string in include/config/kernel.release
include/config/kernel.release: include/config/auto.conf FORCE
$(Q)rm -f $@
- $(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) scripts/setlocalversion $(srctree))" > $@
+ $(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))" > $@
# Things we need to do before we recursively start building the kernel
diff --git a/arch/arm/mach-s5p6442/clock.c b/arch/arm/mach-s5p6442/clock.c
index 3aadbf42c112..087e57f20ad5 100644
--- a/arch/arm/mach-s5p6442/clock.c
+++ b/arch/arm/mach-s5p6442/clock.c
@@ -294,7 +294,7 @@ void __init_or_cpufreq s5p6442_setup_clocks(void)
mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON), pll_4502);
epll = s5p_get_pll45xx(xtal, __raw_readl(S5P_EPLL_CON), pll_4500);
- printk(KERN_INFO "S5P6440: PLL settings, A=%ld, M=%ld, E=%ld",
+ printk(KERN_INFO "S5P6442: PLL settings, A=%ld, M=%ld, E=%ld",
apll, mpll, epll);
clk_fout_apll.rate = apll;
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index 154bca4abc09..af91fefef2c6 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -183,6 +183,11 @@ static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable);
}
+static int s5pv210_clk_mask1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLK_SRC_MASK1, clk, enable);
+}
+
static struct clk clk_sclk_hdmi27m = {
.name = "sclk_hdmi27m",
.id = -1,
@@ -406,14 +411,14 @@ static struct clk init_clocks_disable[] = {
.id = 0,
.parent = &clk_p,
.enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1<<4),
+ .ctrlbit = (1 << 5),
}, {
.name = "i2s_v32",
.id = 1,
.parent = &clk_p,
.enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1<<4),
- }
+ .ctrlbit = (1 << 6),
+ },
};
static struct clk init_clocks[] = {
@@ -429,25 +434,25 @@ static struct clk init_clocks[] = {
.id = 0,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1<<7),
+ .ctrlbit = (1 << 17),
}, {
.name = "uart",
.id = 1,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1<<8),
+ .ctrlbit = (1 << 18),
}, {
.name = "uart",
.id = 2,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1<<9),
+ .ctrlbit = (1 << 19),
}, {
.name = "uart",
.id = 3,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1<<10),
+ .ctrlbit = (1 << 20),
},
};
@@ -497,8 +502,8 @@ static struct clksrc_clk clk_sclk_dac = {
.clk = {
.name = "sclk_dac",
.id = -1,
- .ctrlbit = (1 << 10),
- .enable = s5pv210_clk_ip1_ctrl,
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 2),
},
.sources = &clkset_sclk_dac,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 8, .size = 1 },
@@ -527,8 +532,8 @@ static struct clksrc_clk clk_sclk_hdmi = {
.clk = {
.name = "sclk_hdmi",
.id = -1,
- .enable = s5pv210_clk_ip1_ctrl,
- .ctrlbit = (1 << 11),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 0),
},
.sources = &clkset_sclk_hdmi,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 0, .size = 1 },
@@ -565,8 +570,8 @@ static struct clksrc_clk clk_sclk_audio0 = {
.clk = {
.name = "sclk_audio",
.id = 0,
- .enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1 << 4),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 24),
},
.sources = &clkset_sclk_audio0,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 0, .size = 4 },
@@ -594,8 +599,8 @@ static struct clksrc_clk clk_sclk_audio1 = {
.clk = {
.name = "sclk_audio",
.id = 1,
- .enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1 << 5),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 25),
},
.sources = &clkset_sclk_audio1,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 4, .size = 4 },
@@ -623,8 +628,8 @@ static struct clksrc_clk clk_sclk_audio2 = {
.clk = {
.name = "sclk_audio",
.id = 2,
- .enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1 << 6),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 26),
},
.sources = &clkset_sclk_audio2,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 8, .size = 4 },
@@ -680,8 +685,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "uclk1",
.id = 0,
- .ctrlbit = (1<<17),
- .enable = s5pv210_clk_ip3_ctrl,
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 12),
},
.sources = &clkset_uart,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 16, .size = 4 },
@@ -690,8 +695,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "uclk1",
.id = 1,
- .enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1 << 18),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 13),
},
.sources = &clkset_uart,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 20, .size = 4 },
@@ -700,8 +705,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "uclk1",
.id = 2,
- .enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1 << 19),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 14),
},
.sources = &clkset_uart,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 24, .size = 4 },
@@ -710,8 +715,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "uclk1",
.id = 3,
- .enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1 << 20),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 15),
},
.sources = &clkset_uart,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 28, .size = 4 },
@@ -720,8 +725,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_mixer",
.id = -1,
- .enable = s5pv210_clk_ip1_ctrl,
- .ctrlbit = (1 << 9),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 1),
},
.sources = &clkset_sclk_mixer,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 4, .size = 1 },
@@ -738,8 +743,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_fimc",
.id = 0,
- .enable = s5pv210_clk_ip0_ctrl,
- .ctrlbit = (1 << 24),
+ .enable = s5pv210_clk_mask1_ctrl,
+ .ctrlbit = (1 << 2),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC3, .shift = 12, .size = 4 },
@@ -748,8 +753,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_fimc",
.id = 1,
- .enable = s5pv210_clk_ip0_ctrl,
- .ctrlbit = (1 << 25),
+ .enable = s5pv210_clk_mask1_ctrl,
+ .ctrlbit = (1 << 3),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC3, .shift = 16, .size = 4 },
@@ -758,8 +763,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_fimc",
.id = 2,
- .enable = s5pv210_clk_ip0_ctrl,
- .ctrlbit = (1 << 26),
+ .enable = s5pv210_clk_mask1_ctrl,
+ .ctrlbit = (1 << 4),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC3, .shift = 20, .size = 4 },
@@ -768,6 +773,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_cam",
.id = 0,
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 3),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 12, .size = 4 },
@@ -776,6 +783,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_cam",
.id = 1,
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 4),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 16, .size = 4 },
@@ -784,8 +793,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_fimd",
.id = -1,
- .enable = s5pv210_clk_ip1_ctrl,
- .ctrlbit = (1 << 0),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 5),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 20, .size = 4 },
@@ -794,8 +803,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_mmc",
.id = 0,
- .enable = s5pv210_clk_ip2_ctrl,
- .ctrlbit = (1 << 16),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 8),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 0, .size = 4 },
@@ -804,8 +813,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_mmc",
.id = 1,
- .enable = s5pv210_clk_ip2_ctrl,
- .ctrlbit = (1 << 17),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 9),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 4, .size = 4 },
@@ -814,8 +823,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_mmc",
.id = 2,
- .enable = s5pv210_clk_ip2_ctrl,
- .ctrlbit = (1 << 18),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 10),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 8, .size = 4 },
@@ -824,8 +833,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_mmc",
.id = 3,
- .enable = s5pv210_clk_ip2_ctrl,
- .ctrlbit = (1 << 19),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 11),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 12, .size = 4 },
@@ -864,8 +873,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_csis",
.id = -1,
- .enable = s5pv210_clk_ip0_ctrl,
- .ctrlbit = (1 << 31),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 6),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 24, .size = 4 },
@@ -874,8 +883,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_spi",
.id = 0,
- .enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1 << 12),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 16),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC5, .shift = 0, .size = 4 },
@@ -884,8 +893,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_spi",
.id = 1,
- .enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1 << 13),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 17),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC5, .shift = 4, .size = 4 },
@@ -894,8 +903,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_pwi",
.id = -1,
- .enable = &s5pv210_clk_ip4_ctrl,
- .ctrlbit = (1 << 2),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 29),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 20, .size = 4 },
@@ -904,8 +913,8 @@ static struct clksrc_clk clksrcs[] = {
.clk = {
.name = "sclk_pwm",
.id = -1,
- .enable = s5pv210_clk_ip3_ctrl,
- .ctrlbit = (1 << 23),
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 19),
},
.sources = &clkset_group2,
.reg_src = { .reg = S5P_CLK_SRC5, .shift = 12, .size = 4 },
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-s5p/irq-eint.c
index e56c8075df97..f36cd3327025 100644
--- a/arch/arm/plat-s5p/irq-eint.c
+++ b/arch/arm/plat-s5p/irq-eint.c
@@ -71,7 +71,7 @@ static int s5p_irq_eint_set_type(unsigned int irq, unsigned int type)
break;
case IRQ_TYPE_EDGE_FALLING:
- newvalue = S5P_EXTINT_RISEEDGE;
+ newvalue = S5P_EXTINT_FALLEDGE;
break;
case IRQ_TYPE_EDGE_BOTH:
diff --git a/arch/arm/plat-samsung/include/plat/sdhci.h b/arch/arm/plat-samsung/include/plat/sdhci.h
index 13f9fb20900a..016674fa20dd 100644
--- a/arch/arm/plat-samsung/include/plat/sdhci.h
+++ b/arch/arm/plat-samsung/include/plat/sdhci.h
@@ -166,8 +166,10 @@ static inline void s3c6410_default_sdhci2(void) { }
#else
static inline void s3c6410_default_sdhci0(void) { }
static inline void s3c6410_default_sdhci1(void) { }
+static inline void s3c6410_default_sdhci2(void) { }
static inline void s3c6400_default_sdhci0(void) { }
static inline void s3c6400_default_sdhci1(void) { }
+static inline void s3c6400_default_sdhci2(void) { }
#endif /* CONFIG_S3C64XX_SETUP_SDHCI */
@@ -239,7 +241,7 @@ static inline void s5pv210_default_sdhci0(void)
s3c_hsmmc0_def_platdata.cfg_card = s5pv210_setup_sdhci_cfg_card;
}
#else
-static inline void s5pc100_default_sdhci0(void) { }
+static inline void s5pv210_default_sdhci0(void) { }
#endif /* CONFIG_S3C_DEV_HSMMC */
#ifdef CONFIG_S3C_DEV_HSMMC1
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index 14eb8c492da2..5ef06a164a82 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -193,9 +193,15 @@ static void restore_core_regs(void)
void au_sleep(void)
{
- save_core_regs();
- au1xxx_save_and_sleep();
- restore_core_regs();
+ int cpuid = alchemy_get_cputype();
+ if (cpuid != ALCHEMY_CPU_UNKNOWN) {
+ save_core_regs();
+ if (cpuid <= ALCHEMY_CPU_AU1500)
+ alchemy_sleep_au1000();
+ else if (cpuid <= ALCHEMY_CPU_AU1200)
+ alchemy_sleep_au1550();
+ restore_core_regs();
+ }
}
#endif /* CONFIG_PM */
diff --git a/arch/mips/alchemy/common/sleeper.S b/arch/mips/alchemy/common/sleeper.S
index 4f4b16741d12..77f3c743b716 100644
--- a/arch/mips/alchemy/common/sleeper.S
+++ b/arch/mips/alchemy/common/sleeper.S
@@ -22,10 +22,9 @@
.set noat
.align 5
-/* Save all of the processor general registers and go to sleep.
- * A wakeup condition will get us back here to restore the registers.
- */
-LEAF(au1xxx_save_and_sleep)
+
+/* preparatory stuff */
+.macro SETUP_SLEEP
subu sp, PT_SIZE
sw $1, PT_R1(sp)
sw $2, PT_R2(sp)
@@ -69,12 +68,32 @@ LEAF(au1xxx_save_and_sleep)
*/
lui t3, 0xb190 /* sys_xxx */
sw sp, 0x0018(t3)
- la k0, 3f /* resume path */
+ la k0, alchemy_sleep_wakeup /* resume path */
sw k0, 0x001c(t3)
+.endm
- /* Put SDRAM into self refresh: Preload instructions into cache,
- * issue a precharge, auto/self refresh, then sleep commands to it.
- */
+.macro DO_SLEEP
+ /* put power supply and processor to sleep */
+ sw zero, 0x0078(t3) /* sys_slppwr */
+ sync
+ sw zero, 0x007c(t3) /* sys_sleep */
+ sync
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+.endm
+
+/* sleep code for Au1000/Au1100/Au1500 memory controller type */
+LEAF(alchemy_sleep_au1000)
+
+ SETUP_SLEEP
+
+ /* cache following instructions, as memory gets put to sleep */
la t0, 1f
.set mips3
cache 0x14, 0(t0)
@@ -84,17 +103,32 @@ LEAF(au1xxx_save_and_sleep)
.set mips0
1: lui a0, 0xb400 /* mem_xxx */
-#if defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100) || \
- defined(CONFIG_SOC_AU1500)
sw zero, 0x001c(a0) /* Precharge */
sync
sw zero, 0x0020(a0) /* Auto Refresh */
sync
sw zero, 0x0030(a0) /* Sleep */
sync
-#endif
-#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
+ DO_SLEEP
+
+END(alchemy_sleep_au1000)
+
+/* sleep code for Au1550/Au1200 memory controller type */
+LEAF(alchemy_sleep_au1550)
+
+ SETUP_SLEEP
+
+ /* cache following instructions, as memory gets put to sleep */
+ la t0, 1f
+ .set mips3
+ cache 0x14, 0(t0)
+ cache 0x14, 32(t0)
+ cache 0x14, 64(t0)
+ cache 0x14, 96(t0)
+ .set mips0
+
+1: lui a0, 0xb400 /* mem_xxx */
sw zero, 0x08c0(a0) /* Precharge */
sync
sw zero, 0x08d0(a0) /* Self Refresh */
@@ -114,26 +148,17 @@ LEAF(au1xxx_save_and_sleep)
and t1, t0, t1 /* clear CE[1:0] */
sw t1, 0x0840(a0) /* mem_sdconfiga */
sync
-#endif
- /* put power supply and processor to sleep */
- sw zero, 0x0078(t3) /* sys_slppwr */
- sync
- sw zero, 0x007c(t3) /* sys_sleep */
- sync
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
+ DO_SLEEP
+
+END(alchemy_sleep_au1550)
+
/* This is where we return upon wakeup.
* Reload all of the registers and return.
*/
-3: lw k0, 0x20(sp)
+LEAF(alchemy_sleep_wakeup)
+ lw k0, 0x20(sp)
mtc0 k0, CP0_STATUS
lw k0, 0x1c(sp)
mtc0 k0, CP0_CONTEXT
@@ -169,4 +194,4 @@ LEAF(au1xxx_save_and_sleep)
lw $31, PT_R31(sp)
jr ra
addiu sp, PT_SIZE
-END(au1xxx_save_and_sleep)
+END(alchemy_sleep_wakeup)
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 566f2d7f2ea3..8f31d1d59683 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -542,7 +542,7 @@ static int __init ar7_register_uarts(void)
if (IS_ERR(bus_clk))
panic("unable to get bus clk\n");
- uart_port.type = PORT_16550A;
+ uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
uart_port.iotype = UPIO_MEM32;
uart_port.regshift = 2;
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile
index 35294b12d638..7465e8a72d9a 100644
--- a/arch/mips/bcm47xx/Makefile
+++ b/arch/mips/bcm47xx/Makefile
@@ -3,4 +3,4 @@
# under Linux.
#
-obj-y := gpio.o irq.o prom.o serial.o setup.o time.o wgt634u.o
+obj-y := gpio.o irq.o nvram.o prom.o serial.o setup.o time.o wgt634u.o
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
new file mode 100644
index 000000000000..06e03b222f6d
--- /dev/null
+++ b/arch/mips/bcm47xx/nvram.c
@@ -0,0 +1,94 @@
+/*
+ * BCM947xx nvram variable access
+ *
+ * Copyright (C) 2005 Broadcom Corporation
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/ssb/ssb.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/addrspace.h>
+#include <asm/mach-bcm47xx/nvram.h>
+#include <asm/mach-bcm47xx/bcm47xx.h>
+
+static char nvram_buf[NVRAM_SPACE];
+
+/* Probe for NVRAM header */
+static void __init early_nvram_init(void)
+{
+ struct ssb_mipscore *mcore = &ssb_bcm47xx.mipscore;
+ struct nvram_header *header;
+ int i;
+ u32 base, lim, off;
+ u32 *src, *dst;
+
+ base = mcore->flash_window;
+ lim = mcore->flash_window_size;
+
+ off = FLASH_MIN;
+ while (off <= lim) {
+ /* Windowed flash access */
+ header = (struct nvram_header *)
+ KSEG1ADDR(base + off - NVRAM_SPACE);
+ if (header->magic == NVRAM_HEADER)
+ goto found;
+ off <<= 1;
+ }
+
+ /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
+ header = (struct nvram_header *) KSEG1ADDR(base + 4096);
+ if (header->magic == NVRAM_HEADER)
+ goto found;
+
+ header = (struct nvram_header *) KSEG1ADDR(base + 1024);
+ if (header->magic == NVRAM_HEADER)
+ goto found;
+
+ return;
+
+found:
+ src = (u32 *) header;
+ dst = (u32 *) nvram_buf;
+ for (i = 0; i < sizeof(struct nvram_header); i += 4)
+ *dst++ = *src++;
+ for (; i < header->len && i < NVRAM_SPACE; i += 4)
+ *dst++ = le32_to_cpu(*src++);
+}
+
+int nvram_getenv(char *name, char *val, size_t val_len)
+{
+ char *var, *value, *end, *eq;
+
+ if (!name)
+ return 1;
+
+ if (!nvram_buf[0])
+ early_nvram_init();
+
+ /* Look for name=value and return value */
+ var = &nvram_buf[sizeof(struct nvram_header)];
+ end = nvram_buf + sizeof(nvram_buf) - 2;
+ end[0] = end[1] = '\0';
+ for (; *var; var = value + strlen(value) + 1) {
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ value = eq + 1;
+ if ((eq - var) == strlen(name) &&
+ strncmp(var, name, (eq - var)) == 0) {
+ snprintf(val, val_len, "%s", value);
+ return 0;
+ }
+ }
+ return 1;
+}
+EXPORT_SYMBOL(nvram_getenv);
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index d442e11625fa..b1aee33efd11 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
- * Copyright (C) 2005 Waldemar Brodkorb <wbx@openwrt.org>
* Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2006 Michael Buesch <mb@bu3sch.de>
+ * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -33,6 +33,7 @@
#include <asm/time.h>
#include <bcm47xx.h>
#include <asm/fw/cfe/cfe_api.h>
+#include <asm/mach-bcm47xx/nvram.h>
struct ssb_bus ssb_bcm47xx;
EXPORT_SYMBOL(ssb_bcm47xx);
@@ -81,28 +82,42 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus,
/* Fill boardinfo structure */
memset(&(iv->boardinfo), 0 , sizeof(struct ssb_boardinfo));
- if (cfe_getenv("boardvendor", buf, sizeof(buf)) >= 0)
+ if (cfe_getenv("boardvendor", buf, sizeof(buf)) >= 0 ||
+ nvram_getenv("boardvendor", buf, sizeof(buf)) >= 0)
iv->boardinfo.type = (u16)simple_strtoul(buf, NULL, 0);
- if (cfe_getenv("boardtype", buf, sizeof(buf)) >= 0)
+ if (cfe_getenv("boardtype", buf, sizeof(buf)) >= 0 ||
+ nvram_getenv("boardtype", buf, sizeof(buf)) >= 0)
iv->boardinfo.type = (u16)simple_strtoul(buf, NULL, 0);
- if (cfe_getenv("boardrev", buf, sizeof(buf)) >= 0)
+ if (cfe_getenv("boardrev", buf, sizeof(buf)) >= 0 ||
+ nvram_getenv("boardrev", buf, sizeof(buf)) >= 0)
iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0);
/* Fill sprom structure */
memset(&(iv->sprom), 0, sizeof(struct ssb_sprom));
iv->sprom.revision = 3;
- if (cfe_getenv("et0macaddr", buf, sizeof(buf)) >= 0)
+ if (cfe_getenv("et0macaddr", buf, sizeof(buf)) >= 0 ||
+ nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0)
str2eaddr(buf, iv->sprom.et0mac);
- if (cfe_getenv("et1macaddr", buf, sizeof(buf)) >= 0)
+
+ if (cfe_getenv("et1macaddr", buf, sizeof(buf)) >= 0 ||
+ nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0)
str2eaddr(buf, iv->sprom.et1mac);
- if (cfe_getenv("et0phyaddr", buf, sizeof(buf)) >= 0)
- iv->sprom.et0phyaddr = simple_strtoul(buf, NULL, 10);
- if (cfe_getenv("et1phyaddr", buf, sizeof(buf)) >= 0)
- iv->sprom.et1phyaddr = simple_strtoul(buf, NULL, 10);
- if (cfe_getenv("et0mdcport", buf, sizeof(buf)) >= 0)
+
+ if (cfe_getenv("et0phyaddr", buf, sizeof(buf)) >= 0 ||
+ nvram_getenv("et0phyaddr", buf, sizeof(buf)) >= 0)
+ iv->sprom.et0phyaddr = simple_strtoul(buf, NULL, 0);
+
+ if (cfe_getenv("et1phyaddr", buf, sizeof(buf)) >= 0 ||
+ nvram_getenv("et1phyaddr", buf, sizeof(buf)) >= 0)
+ iv->sprom.et1phyaddr = simple_strtoul(buf, NULL, 0);
+
+ if (cfe_getenv("et0mdcport", buf, sizeof(buf)) >= 0 ||
+ nvram_getenv("et0mdcport", buf, sizeof(buf)) >= 0)
iv->sprom.et0mdcport = simple_strtoul(buf, NULL, 10);
- if (cfe_getenv("et1mdcport", buf, sizeof(buf)) >= 0)
+
+ if (cfe_getenv("et1mdcport", buf, sizeof(buf)) >= 0 ||
+ nvram_getenv("et1mdcport", buf, sizeof(buf)) >= 0)
iv->sprom.et1mdcport = simple_strtoul(buf, NULL, 10);
return 0;
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
index f1cf38943497..483ffea9ecb1 100644
--- a/arch/mips/include/asm/mach-ar7/ar7.h
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -50,7 +50,7 @@
#define UR8_REGS_WDT (AR7_REGS_BASE + 0x0b00)
#define UR8_REGS_UART1 (AR7_REGS_BASE + 0x0f00)
-#define AR7_RESET_PEREPHERIAL 0x0
+#define AR7_RESET_PERIPHERAL 0x0
#define AR7_RESET_SOFTWARE 0x4
#define AR7_RESET_STATUS 0x8
@@ -128,7 +128,7 @@ static inline int ar7_has_high_cpmac(void)
static inline void ar7_device_enable(u32 bit)
{
void *reset_reg =
- (void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PEREPHERIAL);
+ (void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PERIPHERAL);
writel(readl(reset_reg) | (1 << bit), reset_reg);
msleep(20);
}
@@ -136,7 +136,7 @@ static inline void ar7_device_enable(u32 bit)
static inline void ar7_device_disable(u32 bit)
{
void *reset_reg =
- (void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PEREPHERIAL);
+ (void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PERIPHERAL);
writel(readl(reset_reg) & ~(1 << bit), reset_reg);
msleep(20);
}
diff --git a/arch/mips/include/asm/mach-ar7/gpio.h b/arch/mips/include/asm/mach-ar7/gpio.h
index 73f9b162c970..abc317c0372e 100644
--- a/arch/mips/include/asm/mach-ar7/gpio.h
+++ b/arch/mips/include/asm/mach-ar7/gpio.h
@@ -24,7 +24,7 @@
#define AR7_GPIO_MAX 32
#define NR_BUILTIN_GPIO AR7_GPIO_MAX
-#define gpio_to_irq(gpio) NULL
+#define gpio_to_irq(gpio) -1
#define gpio_get_value __gpio_get_value
#define gpio_set_value __gpio_set_value
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index e76941db2312..a6976619160a 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -188,7 +188,8 @@ extern unsigned long get_au1x00_uart_baud_base(void);
extern unsigned long au1xxx_calc_clock(void);
/* PM: arch/mips/alchemy/common/sleeper.S, power.c, irq.c */
-void au1xxx_save_and_sleep(void);
+void alchemy_sleep_au1000(void);
+void alchemy_sleep_au1550(void);
void au_sleep(void);
diff --git a/arch/mips/include/asm/mach-bcm47xx/nvram.h b/arch/mips/include/asm/mach-bcm47xx/nvram.h
new file mode 100644
index 000000000000..0d8cc146f7a4
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm47xx/nvram.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2005, Broadcom Corporation
+ * Copyright (C) 2006, Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __NVRAM_H
+#define __NVRAM_H
+
+#include <linux/types.h>
+
+struct nvram_header {
+ u32 magic;
+ u32 len;
+ u32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
+ u32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
+ u32 config_ncdl; /* ncdl values for memc */
+};
+
+#define NVRAM_HEADER 0x48534C46 /* 'FLSH' */
+#define NVRAM_VERSION 1
+#define NVRAM_HEADER_SIZE 20
+#define NVRAM_SPACE 0x8000
+
+#define FLASH_MIN 0x00020000 /* Minimum flash size */
+
+#define NVRAM_MAX_VALUE_LEN 255
+#define NVRAM_MAX_PARAM_LEN 64
+
+extern int nvram_getenv(char *name, char *val, size_t val_len);
+
+#endif
diff --git a/arch/mips/include/asm/mach-bcm63xx/gpio.h b/arch/mips/include/asm/mach-bcm63xx/gpio.h
index 7cda8c0a3979..1eb534de8e3b 100644
--- a/arch/mips/include/asm/mach-bcm63xx/gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/gpio.h
@@ -3,7 +3,7 @@
#include <bcm63xx_gpio.h>
-#define gpio_to_irq(gpio) NULL
+#define gpio_to_irq(gpio) -1
#define gpio_get_value __gpio_get_value
#define gpio_set_value __gpio_set_value
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index e9e64e0ff7aa..5a84a1f11231 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -2,7 +2,7 @@
* Code for replacing ftrace calls with jumps.
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
- * Copyright (C) 2009 DSLab, Lanzhou University, China
+ * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*
* Thanks goes to Steven Rostedt for writing the original x86 version.
@@ -12,18 +12,62 @@
#include <linux/init.h>
#include <linux/ftrace.h>
-#include <asm/cacheflush.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
+#include <asm/uasm.h>
+
+/*
+ * If the Instruction Pointer is in module space (0xc0000000), return true;
+ * otherwise, it is in kernel space (0x80000000), return false.
+ *
+ * FIXME: This will not work when the kernel space and module space are the
+ * same. If they are the same, we need to modify scripts/recordmcount.pl,
+ * ftrace_make_nop/call() and the other related parts to ensure the
+ * enabling/disabling of the calling site to _mcount is right for both kernel
+ * and module.
+ */
+
+static inline int in_module(unsigned long ip)
+{
+ return ip & 0x40000000;
+}
#ifdef CONFIG_DYNAMIC_FTRACE
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
-#define jump_insn_encode(op_code, addr) \
- ((unsigned int)((op_code) | (((addr) >> 2) & ADDR_MASK)))
-static unsigned int ftrace_nop = 0x00000000;
+#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
+#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
+#define INSN_NOP 0x00000000 /* nop */
+#define INSN_JAL(addr) \
+ ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
+
+static unsigned int insn_jal_ftrace_caller __read_mostly;
+static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
+static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
+
+static inline void ftrace_dyn_arch_init_insns(void)
+{
+ u32 *buf;
+ unsigned int v1;
+
+ /* lui v1, hi16_mcount */
+ v1 = 3;
+ buf = (u32 *)&insn_lui_v1_hi16_mcount;
+ UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
+
+ /* jal (ftrace_caller + 8), jump over the first two instruction */
+ buf = (u32 *)&insn_jal_ftrace_caller;
+ uasm_i_jal(&buf, (FTRACE_ADDR + 8));
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* j ftrace_graph_caller */
+ buf = (u32 *)&insn_j_ftrace_graph_caller;
+ uasm_i_j(&buf, (unsigned long)ftrace_graph_caller);
+#endif
+}
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
@@ -40,67 +84,56 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
return 0;
}
-static int lui_v1;
-static int jal_mcount;
-
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int new;
- int faulted;
unsigned long ip = rec->ip;
- /* We have compiled module with -mlong-calls, but compiled the kernel
- * without it, we need to cope with them respectively. */
- if (ip & 0x40000000) {
- /* record it for ftrace_make_call */
- if (lui_v1 == 0) {
- /* lui_v1 = *(unsigned int *)ip; */
- safe_load_code(lui_v1, ip, faulted);
-
- if (unlikely(faulted))
- return -EFAULT;
- }
-
- /* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
+ /*
+ * We have compiled module with -mlong-calls, but compiled the kernel
+ * without it, we need to cope with them respectively.
+ */
+ if (in_module(ip)) {
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+ /*
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * move $12, ra_address
+ * jalr v1
+ * sub sp, sp, 8
+ * 1: offset = 5 instructions
+ */
+ new = INSN_B_1F_5;
+#else
+ /*
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
* addiu v1, v1, low_16bit_of_mcount
* move at, ra
* jalr v1
- * nop
- * 1f: (ip + 12)
+ * nop | move $12, ra_address | sub sp, sp, 8
+ * 1: offset = 4 instructions
*/
- new = 0x10000004;
+ new = INSN_B_1F_4;
+#endif
} else {
- /* record/calculate it for ftrace_make_call */
- if (jal_mcount == 0) {
- /* We can record it directly like this:
- * jal_mcount = *(unsigned int *)ip;
- * Herein, jump over the first two nop instructions */
- jal_mcount = jump_insn_encode(JAL, (MCOUNT_ADDR + 8));
- }
-
- /* move at, ra
- * jalr v1 --> nop
+ /*
+ * move at, ra
+ * jal _mcount --> nop
*/
- new = ftrace_nop;
+ new = INSN_NOP;
}
return ftrace_modify_code(ip, new);
}
-static int modified; /* initialized as 0 by default */
-
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int new;
unsigned long ip = rec->ip;
- /* We just need to remove the "b ftrace_stub" at the fist time! */
- if (modified == 0) {
- modified = 1;
- ftrace_modify_code(addr, ftrace_nop);
- }
/* ip, module: 0xc0000000, kernel: 0x80000000 */
- new = (ip & 0x40000000) ? lui_v1 : jal_mcount;
+ new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
return ftrace_modify_code(ip, new);
}
@@ -111,44 +144,48 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned int new;
- new = jump_insn_encode(JAL, (unsigned long)func);
+ new = INSN_JAL((unsigned long)func);
return ftrace_modify_code(FTRACE_CALL_IP, new);
}
int __init ftrace_dyn_arch_init(void *data)
{
+ /* Encode the instructions when booting */
+ ftrace_dyn_arch_init_insns();
+
+ /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
+ ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
+
/* The return code is retured via data */
*(unsigned long *)data = 0;
return 0;
}
-#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
-#define JMP 0x08000000 /* jump to target directly */
-#define CALL_FTRACE_GRAPH_CALLER \
- jump_insn_encode(JMP, (unsigned long)(&ftrace_graph_caller))
#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
int ftrace_enable_ftrace_graph_caller(void)
{
return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
- CALL_FTRACE_GRAPH_CALLER);
+ insn_j_ftrace_graph_caller);
}
int ftrace_disable_ftrace_graph_caller(void)
{
- return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, ftrace_nop);
+ return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
}
-#endif /* !CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_DYNAMIC_FTRACE */
#ifndef KBUILD_MCOUNT_RA_ADDRESS
+
#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
@@ -162,17 +199,17 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
unsigned int code;
int faulted;
- /* in module or kernel? */
- if (self_addr & 0x40000000) {
- /* module: move to the instruction "lui v1, HI_16BIT_OF_MCOUNT" */
- ip = self_addr - 20;
- } else {
- /* kernel: move to the instruction "move ra, at" */
- ip = self_addr - 12;
- }
+ /*
+ * For module, move the ip from calling site of mcount to the
+ * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for
+ * kernel, move to the instruction "move ra, at"(offset is 12)
+ */
+ ip = self_addr - (in_module(self_addr) ? 20 : 12);
- /* search the text until finding the non-store instruction or "s{d,w}
- * ra, offset(sp)" instruction */
+ /*
+ * search the text until finding the non-store instruction or "s{d,w}
+ * ra, offset(sp)" instruction
+ */
do {
ip -= 4;
@@ -181,10 +218,11 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
if (unlikely(faulted))
return 0;
-
- /* If we hit the non-store instruction before finding where the
+ /*
+ * If we hit the non-store instruction before finding where the
* ra is stored, then this is a leaf function and it does not
- * store the ra on the stack. */
+ * store the ra on the stack
+ */
if ((code & S_R_SP) != S_R_SP)
return parent_addr;
@@ -202,7 +240,7 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
return 0;
}
-#endif
+#endif /* !KBUILD_MCOUNT_RA_ADDRESS */
/*
* Hook the return address and push it in the stack of return addrs
@@ -220,7 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
- /* "parent" is the stack address saved the return address of the caller
+ /*
+ * "parent" is the stack address saved the return address of the caller
* of _mcount.
*
* if the gcc < 4.5, a leaf function does not save the return address
@@ -242,10 +281,11 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
goto out;
#ifndef KBUILD_MCOUNT_RA_ADDRESS
parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
- (unsigned long)parent,
- fp);
- /* If fails when getting the stack address of the non-leaf function's
- * ra, stop function graph tracer and return */
+ (unsigned long)parent, fp);
+ /*
+ * If fails when getting the stack address of the non-leaf function's
+ * ra, stop function graph tracer and return
+ */
if (parent == 0)
goto out;
#endif
@@ -272,4 +312,4 @@ out:
ftrace_graph_stop();
WARN_ON(1);
}
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 6851fc97a511..6bfcb7a00ec6 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -6,6 +6,7 @@
* more details.
*
* Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China
+ * Copyright (C) 2010 DSLab, Lanzhou University, China
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
@@ -45,8 +46,6 @@
PTR_L a5, PT_R9(sp)
PTR_L a6, PT_R10(sp)
PTR_L a7, PT_R11(sp)
-#endif
-#ifdef CONFIG_64BIT
PTR_ADDIU sp, PT_SIZE
#else
PTR_ADDIU sp, (PT_SIZE + 8)
@@ -58,6 +57,12 @@
move ra, AT
.endm
+/*
+ * The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass
+ * the location of the parent's return address.
+ */
+#define MCOUNT_RA_ADDRESS_REG $12
+
#ifdef CONFIG_DYNAMIC_FTRACE
NESTED(ftrace_caller, PT_SIZE, ra)
@@ -71,14 +76,14 @@ _mcount:
MCOUNT_SAVE_REGS
#ifdef KBUILD_MCOUNT_RA_ADDRESS
- PTR_S t0, PT_R12(sp) /* t0 saved the location of the return address(at) by -mmcount-ra-address */
+ PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp)
#endif
- move a0, ra /* arg1: next ip, selfaddr */
+ move a0, ra /* arg1: self return address */
.globl ftrace_call
ftrace_call:
nop /* a placeholder for the call to a real tracing function */
- move a1, AT /* arg2: the caller's next ip, parent */
+ move a1, AT /* arg2: parent's return address */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
@@ -119,9 +124,9 @@ NESTED(_mcount, PT_SIZE, ra)
static_trace:
MCOUNT_SAVE_REGS
- move a0, ra /* arg1: next ip, selfaddr */
+ move a0, ra /* arg1: self return address */
jalr t2 /* (1) call *ftrace_trace_function */
- move a1, AT /* arg2: the caller's next ip, parent */
+ move a1, AT /* arg2: parent's return address */
MCOUNT_RESTORE_REGS
.globl ftrace_stub
@@ -134,28 +139,34 @@ ftrace_stub:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
NESTED(ftrace_graph_caller, PT_SIZE, ra)
-#ifdef CONFIG_DYNAMIC_FTRACE
- PTR_L a1, PT_R31(sp) /* load the original ra from the stack */
-#ifdef KBUILD_MCOUNT_RA_ADDRESS
- PTR_L t0, PT_R12(sp) /* load the original t0 from the stack */
-#endif
-#else
+#ifndef CONFIG_DYNAMIC_FTRACE
MCOUNT_SAVE_REGS
- move a1, ra /* arg2: next ip, selfaddr */
#endif
+ /* arg1: Get the location of the parent's return address */
#ifdef KBUILD_MCOUNT_RA_ADDRESS
- bnez t0, 1f /* non-leaf func: t0 saved the location of the return address */
+#ifdef CONFIG_DYNAMIC_FTRACE
+ PTR_L a0, PT_R12(sp)
+#else
+ move a0, MCOUNT_RA_ADDRESS_REG
+#endif
+ bnez a0, 1f /* non-leaf func: stored in MCOUNT_RA_ADDRESS_REG */
nop
- PTR_LA t0, PT_R1(sp) /* leaf func: get the location of at(old ra) from our own stack */
-1: move a0, t0 /* arg1: the location of the return address */
+#endif
+ PTR_LA a0, PT_R1(sp) /* leaf func: the location in current stack */
+1:
+
+ /* arg2: Get self return address */
+#ifdef CONFIG_DYNAMIC_FTRACE
+ PTR_L a1, PT_R31(sp)
#else
- PTR_LA a0, PT_R1(sp) /* arg1: &AT -> a0 */
+ move a1, ra
#endif
- jal prepare_ftrace_return
+
+ /* arg3: Get frame pointer of current stack */
#ifdef CONFIG_FRAME_POINTER
- move a2, fp /* arg3: frame pointer */
-#else
+ move a2, fp
+#else /* ! CONFIG_FRAME_POINTER */
#ifdef CONFIG_64BIT
PTR_LA a2, PT_SIZE(sp)
#else
@@ -163,6 +174,8 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra)
#endif
#endif
+ jal prepare_ftrace_return
+ nop
MCOUNT_RESTORE_REGS
RETURN_BACK
END(ftrace_graph_caller)
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index f5981c499109..2340f11dc29c 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -3,6 +3,7 @@
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/cpu.h>
+#include <linux/cpuset.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/kernel.h>
@@ -39,6 +40,21 @@ static inline struct task_struct *find_process_by_pid(pid_t pid)
return pid ? find_task_by_vpid(pid) : current;
}
+/*
+ * check the target process has a UID that matches the current process's
+ */
+static bool check_same_owner(struct task_struct *p)
+{
+ const struct cred *cred = current_cred(), *pcred;
+ bool match;
+
+ rcu_read_lock();
+ pcred = __task_cred(p);
+ match = (cred->euid == pcred->euid ||
+ cred->euid == pcred->uid);
+ rcu_read_unlock();
+ return match;
+}
/*
* mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
@@ -46,12 +62,10 @@ static inline struct task_struct *find_process_by_pid(pid_t pid)
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
- cpumask_t new_mask;
- cpumask_t effective_mask;
- int retval;
- struct task_struct *p;
+ cpumask_var_t cpus_allowed, new_mask, effective_mask;
struct thread_info *ti;
- uid_t euid;
+ struct task_struct *p;
+ int retval;
if (len < sizeof(new_mask))
return -EINVAL;
@@ -60,53 +74,74 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
return -EFAULT;
get_online_cpus();
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
put_online_cpus();
return -ESRCH;
}
- /*
- * It is not safe to call set_cpus_allowed with the
- * tasklist_lock held. We will bump the task_struct's
- * usage count and drop tasklist_lock before invoking
- * set_cpus_allowed.
- */
+ /* Prevent p going away */
get_task_struct(p);
+ rcu_read_unlock();
- euid = current_euid();
+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_put_task;
+ }
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_cpus_allowed;
+ }
+ if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_new_mask;
+ }
retval = -EPERM;
- if (euid != p->cred->euid && euid != p->cred->uid &&
- !capable(CAP_SYS_NICE)) {
- read_unlock(&tasklist_lock);
+ if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
goto out_unlock;
- }
retval = security_task_setscheduler(p, 0, NULL);
if (retval)
goto out_unlock;
/* Record new user-specified CPU set for future reference */
- p->thread.user_cpus_allowed = new_mask;
-
- /* Unlock the task list */
- read_unlock(&tasklist_lock);
+ cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
+ again:
/* Compute new global allowed CPU set if necessary */
ti = task_thread_info(p);
if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
- cpus_intersects(new_mask, mt_fpu_cpumask)) {
- cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
- retval = set_cpus_allowed_ptr(p, &effective_mask);
+ cpus_intersects(*new_mask, mt_fpu_cpumask)) {
+ cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
+ retval = set_cpus_allowed_ptr(p, effective_mask);
} else {
+ cpumask_copy(effective_mask, new_mask);
clear_ti_thread_flag(ti, TIF_FPUBOUND);
- retval = set_cpus_allowed_ptr(p, &new_mask);
+ retval = set_cpus_allowed_ptr(p, new_mask);
}
+ if (!retval) {
+ cpuset_cpus_allowed(p, cpus_allowed);
+ if (!cpumask_subset(effective_mask, cpus_allowed)) {
+ /*
+ * We must have raced with a concurrent cpuset
+ * update. Just reset the cpus_allowed to the
+ * cpuset's cpus_allowed
+ */
+ cpumask_copy(new_mask, cpus_allowed);
+ goto again;
+ }
+ }
out_unlock:
+ free_cpumask_var(effective_mask);
+out_free_new_mask:
+ free_cpumask_var(new_mask);
+out_free_cpus_allowed:
+ free_cpumask_var(cpus_allowed);
+out_put_task:
put_task_struct(p);
put_online_cpus();
return retval;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 8bdd6a663c7f..852780868fb4 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -976,7 +976,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
- break;
+ return;
case 3:
break;
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index 3df1967dea08..c97ca69b94e0 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -23,6 +23,7 @@ config LEMOTE_FULOONG2E
select GENERIC_HARDIRQS_NO__DO_IRQ
select GENERIC_ISA_DMA_SUPPORT_BROKEN
select CPU_HAS_WB
+ select LOONGSON_MC146818
help
Lemote Fuloong(2e) mini-PC board based on the Chinese Loongson-2E CPU and
an FPGA northbridge
@@ -51,6 +52,7 @@ config LEMOTE_MACH2F
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select LOONGSON_MC146818
help
Lemote Loongson 2F family machines utilize the 2F revision of
Loongson processor and the AMD CS5536 south bridge.
@@ -83,3 +85,7 @@ config LOONGSON_UART_BASE
bool
default y
depends on EARLY_PRINTK || SERIAL_8250
+
+config LOONGSON_MC146818
+ bool
+ default n
diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
index cdd2e812ba1a..e526488df655 100644
--- a/arch/mips/loongson/common/Makefile
+++ b/arch/mips/loongson/common/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_GENERIC_GPIO) += gpio.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_SERIAL_8250) += serial.o
obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
+obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
#
# Enable CS5536 Virtual Support Module(VSM) to virtulize the PCI configure
diff --git a/arch/mips/loongson/common/cs5536/cs5536_ehci.c b/arch/mips/loongson/common/cs5536/cs5536_ehci.c
index eaf8b86e3318..5b5cbba699b3 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_ehci.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_ehci.c
@@ -49,6 +49,8 @@ void pci_ehci_write_reg(int reg, u32 value)
lo |= SOFT_BAR_EHCI_FLAG;
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else if ((value & 0x01) == 0x00) {
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ lo = value;
_wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
value &= 0xfffffff0;
diff --git a/arch/mips/loongson/common/cs5536/cs5536_ide.c b/arch/mips/loongson/common/cs5536/cs5536_ide.c
index 9a96b5664c78..681d1291a2c7 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_ide.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_ide.c
@@ -51,6 +51,7 @@ void pci_ide_write_reg(int reg, u32 value)
lo |= SOFT_BAR_IDE_FLAG;
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else if (value & 0x01) {
+ _rdmsr(IDE_MSR_REG(IDE_IO_BAR), &hi, &lo);
lo = (value & 0xfffffff0) | 0x1;
_wrmsr(IDE_MSR_REG(IDE_IO_BAR), hi, lo);
@@ -65,19 +66,30 @@ void pci_ide_write_reg(int reg, u32 value)
_rdmsr(DIVIL_MSR_REG(DIVIL_BALL_OPTS), &hi, &lo);
lo |= 0x01;
_wrmsr(DIVIL_MSR_REG(DIVIL_BALL_OPTS), hi, lo);
- } else
+ } else {
+ _rdmsr(IDE_MSR_REG(IDE_CFG), &hi, &lo);
+ lo = value;
_wrmsr(IDE_MSR_REG(IDE_CFG), hi, lo);
+ }
break;
case PCI_IDE_DTC_REG:
+ _rdmsr(IDE_MSR_REG(IDE_DTC), &hi, &lo);
+ lo = value;
_wrmsr(IDE_MSR_REG(IDE_DTC), hi, lo);
break;
case PCI_IDE_CAST_REG:
+ _rdmsr(IDE_MSR_REG(IDE_CAST), &hi, &lo);
+ lo = value;
_wrmsr(IDE_MSR_REG(IDE_CAST), hi, lo);
break;
case PCI_IDE_ETC_REG:
+ _rdmsr(IDE_MSR_REG(IDE_ETC), &hi, &lo);
+ lo = value;
_wrmsr(IDE_MSR_REG(IDE_ETC), hi, lo);
break;
case PCI_IDE_PM_REG:
+ _rdmsr(IDE_MSR_REG(IDE_INTERNAL_PM), &hi, &lo);
+ lo = value;
_wrmsr(IDE_MSR_REG(IDE_INTERNAL_PM), hi, lo);
break;
default:
@@ -167,6 +179,7 @@ u32 pci_ide_read_reg(int reg)
case PCI_IDE_ETC_REG:
_rdmsr(IDE_MSR_REG(IDE_ETC), &hi, &lo);
conf_data = lo;
+ break;
case PCI_IDE_PM_REG:
_rdmsr(IDE_MSR_REG(IDE_INTERNAL_PM), &hi, &lo);
conf_data = lo;
diff --git a/arch/mips/loongson/common/cs5536/cs5536_isa.c b/arch/mips/loongson/common/cs5536/cs5536_isa.c
index f5c0818831b2..4d9f65abeaff 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_isa.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_isa.c
@@ -61,7 +61,7 @@ static void divil_lbar_enable(void)
for (offset = DIVIL_LBAR_SMB; offset <= DIVIL_LBAR_PMS; offset++) {
_rdmsr(DIVIL_MSR_REG(offset), &hi, &lo);
hi |= 0x01;
- _wrmsr(DIVIL_MSR_REG(DIVIL_LBAR_SMB), hi, lo);
+ _wrmsr(DIVIL_MSR_REG(offset), hi, lo);
}
}
@@ -76,7 +76,7 @@ static void divil_lbar_disable(void)
for (offset = DIVIL_LBAR_SMB; offset <= DIVIL_LBAR_PMS; offset++) {
_rdmsr(DIVIL_MSR_REG(offset), &hi, &lo);
hi &= ~0x01;
- _wrmsr(DIVIL_MSR_REG(DIVIL_LBAR_SMB), hi, lo);
+ _wrmsr(DIVIL_MSR_REG(offset), hi, lo);
}
}
diff --git a/arch/mips/loongson/common/cs5536/cs5536_ohci.c b/arch/mips/loongson/common/cs5536/cs5536_ohci.c
index db5900aadd6b..bdedf512baf7 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_ohci.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_ohci.c
@@ -49,6 +49,8 @@ void pci_ohci_write_reg(int reg, u32 value)
lo |= SOFT_BAR_OHCI_FLAG;
_wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
} else if ((value & 0x01) == 0x00) {
+ _rdmsr(USB_MSR_REG(USB_OHCI), &hi, &lo);
+ lo = value;
_wrmsr(USB_MSR_REG(USB_OHCI), hi, lo);
value &= 0xfffffff0;
diff --git a/arch/mips/loongson/common/rtc.c b/arch/mips/loongson/common/rtc.c
new file mode 100644
index 000000000000..a90d87c01555
--- /dev/null
+++ b/arch/mips/loongson/common/rtc.c
@@ -0,0 +1,43 @@
+/*
+ * Lemote Fuloong platform support
+ *
+ * Copyright(c) 2010 Arnaud Patard <apatard@mandriva.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mc146818rtc.h>
+
+struct resource loongson_rtc_resources[] = {
+ {
+ .start = RTC_PORT(0),
+ .end = RTC_PORT(1),
+ .flags = IORESOURCE_IO,
+ }, {
+ .start = RTC_IRQ,
+ .end = RTC_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device loongson_rtc_device = {
+ .name = "rtc_cmos",
+ .id = -1,
+ .resource = loongson_rtc_resources,
+ .num_resources = ARRAY_SIZE(loongson_rtc_resources),
+};
+
+
+static int __init loongson_rtc_platform_init(void)
+{
+ platform_device_register(&loongson_rtc_device);
+ return 0;
+}
+
+device_initcall(loongson_rtc_platform_init);
diff --git a/arch/mips/math-emu/dp_simple.c b/arch/mips/math-emu/dp_simple.c
index d9ae1dbabda7..b90974246e5b 100644
--- a/arch/mips/math-emu/dp_simple.c
+++ b/arch/mips/math-emu/dp_simple.c
@@ -78,6 +78,7 @@ ieee754dp ieee754dp_abs(ieee754dp x)
DPSIGN(x) = 0;
if (xc == IEEE754_CLASS_SNAN) {
+ SETCX(IEEE754_INVALID_OPERATION);
return ieee754dp_nanxcpt(ieee754dp_indef(), "abs");
}
diff --git a/arch/mips/math-emu/sp_simple.c b/arch/mips/math-emu/sp_simple.c
index 3175477d36f6..2fd53c920e99 100644
--- a/arch/mips/math-emu/sp_simple.c
+++ b/arch/mips/math-emu/sp_simple.c
@@ -78,6 +78,7 @@ ieee754sp ieee754sp_abs(ieee754sp x)
SPSIGN(x) = 0;
if (xc == IEEE754_CLASS_SNAN) {
+ SETCX(IEEE754_INVALID_OPERATION);
return ieee754sp_nanxcpt(ieee754sp_indef(), "abs");
}
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c
index d0d24e047676..60d3ea602118 100644
--- a/arch/mips/oprofile/op_model_loongson2.c
+++ b/arch/mips/oprofile/op_model_loongson2.c
@@ -43,6 +43,12 @@ static struct loongson2_register_config {
static char *oprofid = "LoongsonPerf";
static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id);
+static void reset_counters(void *arg)
+{
+ write_c0_perfctrl(0);
+ write_c0_perfcnt(0);
+}
+
static void loongson2_reg_setup(struct op_counter_config *cfg)
{
unsigned int ctrl = 0;
@@ -139,7 +145,7 @@ static int __init loongson2_init(void)
static void loongson2_exit(void)
{
- write_c0_perfctrl(0);
+ reset_counters(NULL);
free_irq(LOONGSON2_PERFCNT_IRQ, oprofid);
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 328774bd41ee..6506bf4fbff1 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -400,7 +400,7 @@ config IRQ_ALL_CPUS
config SPARSE_IRQ
bool "Support sparse irq numbering"
- default y
+ default n
help
This enables support for sparse irqs. This is useful for distro
kernels that want to define a high CONFIG_NR_CPUS value but still
@@ -409,7 +409,7 @@ config SPARSE_IRQ
( Sparse IRQs can also be beneficial on NUMA boxes, as they spread
out the irq_desc[] array in a more NUMA-friendly way. )
- If you don't know what to do here, say Y.
+ If you don't know what to do here, say N.
config NUMA
bool "NUMA support"
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 42dcd3f4ad7b..77cfe7a29e25 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -92,10 +92,10 @@ endif
else
KBUILD_CFLAGS += $(call cc-option,-mtune=power4)
endif
-else
-LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
endif
+LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+
ifeq ($(CONFIG_TUNE_CELL),y)
KBUILD_CFLAGS += $(call cc-option,-mtune=cell)
endif
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 5d8be0416227..0175a676b34b 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -24,11 +24,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifdef __KERNEL__
#include <linux/types.h>
-#else
-#include <stdint.h>
-#endif
#ifndef __ASSEMBLY__
@@ -364,13 +360,13 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
#ifndef __ASSEMBLY__
struct ppc_debug_info {
- uint32_t version; /* Only version 1 exists to date */
- uint32_t num_instruction_bps;
- uint32_t num_data_bps;
- uint32_t num_condition_regs;
- uint32_t data_bp_alignment;
- uint32_t sizeof_condition; /* size of the DVC register */
- uint64_t features;
+ __u32 version; /* Only version 1 exists to date */
+ __u32 num_instruction_bps;
+ __u32 num_data_bps;
+ __u32 num_condition_regs;
+ __u32 data_bp_alignment;
+ __u32 sizeof_condition; /* size of the DVC register */
+ __u64 features;
};
#endif /* __ASSEMBLY__ */
@@ -386,13 +382,13 @@ struct ppc_debug_info {
#ifndef __ASSEMBLY__
struct ppc_hw_breakpoint {
- uint32_t version; /* currently, version must be 1 */
- uint32_t trigger_type; /* only some combinations allowed */
- uint32_t addr_mode; /* address match mode */
- uint32_t condition_mode; /* break/watchpoint condition flags */
- uint64_t addr; /* break/watchpoint address */
- uint64_t addr2; /* range end or mask */
- uint64_t condition_value; /* contents of the DVC register */
+ __u32 version; /* currently, version must be 1 */
+ __u32 trigger_type; /* only some combinations allowed */
+ __u32 addr_mode; /* address match mode */
+ __u32 condition_mode; /* break/watchpoint condition flags */
+ __u64 addr; /* break/watchpoint address */
+ __u64 addr2; /* range end or mask */
+ __u64 condition_value; /* contents of the DVC register */
};
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index b46f2e09bd81..29df48f2b61a 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -447,7 +447,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
crash_kexec_prepare_cpus(crashing_cpu);
cpu_set(crashing_cpu, cpus_in_crash);
crash_kexec_stop_spus();
-#ifdef CONFIG_PPC_STD_MMU_64
+#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
crash_kexec_wait_realmode(crashing_cpu);
#endif
if (ppc_md.kexec_cpu_down)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 3333bbdd23ef..77be3d058a65 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -295,7 +295,10 @@ void fixup_irqs(const struct cpumask *map)
for_each_irq(irq) {
desc = irq_to_desc(irq);
- if (desc && desc->status & IRQ_PER_CPU)
+ if (!desc)
+ continue;
+
+ if (desc->status & IRQ_PER_CPU)
continue;
cpumask_and(mask, desc->affinity, map);
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 43b83c35cf54..5c14ffe51258 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -791,8 +791,11 @@ static void power_pmu_disable(struct perf_event *event)
cpuhw = &__get_cpu_var(cpu_hw_events);
for (i = 0; i < cpuhw->n_events; ++i) {
if (event == cpuhw->event[i]) {
- while (++i < cpuhw->n_events)
+ while (++i < cpuhw->n_events) {
cpuhw->event[i-1] = cpuhw->event[i];
+ cpuhw->events[i-1] = cpuhw->events[i];
+ cpuhw->flags[i-1] = cpuhw->flags[i];
+ }
--cpuhw->n_events;
ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
if (event->hw.idx) {
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 97d4bd9442d3..3b6f8ae9b8cc 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -872,7 +872,7 @@ static void __init prom_send_capabilities(void)
"ibm_architecture_vec structure inconsistent: 0x%x !\n",
*cores);
} else {
- *cores = NR_CPUS / prom_count_smt_threads();
+ *cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
prom_printf("Max number of cores passed to firmware: 0x%x\n",
(unsigned long)*cores);
}
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 1ac136b128f0..9f82f4937892 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -52,12 +52,18 @@ do
if [ "${UNDEF:0:9}" = "_restgpr_" ]; then
OK=1
fi
+ if [ "${UNDEF:0:10}" = "_restgpr0_" ]; then
+ OK=1
+ fi
if [ "${UNDEF:0:11}" = "_rest32gpr_" ]; then
OK=1
fi
if [ "${UNDEF:0:9}" = "_savegpr_" ]; then
OK=1
fi
+ if [ "${UNDEF:0:10}" = "_savegpr0_" ]; then
+ OK=1
+ fi
if [ "${UNDEF:0:11}" = "_save32gpr_" ]; then
OK=1
fi
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 3040dac18a37..111da1c03a11 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -12,8 +12,8 @@ CFLAGS_REMOVE_code-patching.o = -pg
CFLAGS_REMOVE_feature-fixups.o = -pg
obj-y := string.o alloc.o \
- checksum_$(CONFIG_WORD_SIZE).o
-obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o
+ checksum_$(CONFIG_WORD_SIZE).o crtsavres.o
+obj-$(CONFIG_PPC32) += div64.o copy_32.o
obj-$(CONFIG_HAS_IOMEM) += devres.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index 70a9cd8a3008..1c893f05d224 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -6,6 +6,7 @@
* Written By Michael Meissner
*
* Based on gcc/config/rs6000/crtsavres.asm from gcc
+ * 64 bit additions from reading the PPC elf64abi document.
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -44,6 +45,8 @@
#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+#ifndef CONFIG_PPC64
+
/* Routines for saving integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
/* function, just beyond the end of the integer save area. */
@@ -226,4 +229,130 @@ _GLOBAL(_rest32gpr_31_x)
mtlr 0
mr 1,11
blr
+
+#else /* CONFIG_PPC64 */
+
+.globl _savegpr0_14
+_savegpr0_14:
+ std r14,-144(r1)
+.globl _savegpr0_15
+_savegpr0_15:
+ std r15,-136(r1)
+.globl _savegpr0_16
+_savegpr0_16:
+ std r16,-128(r1)
+.globl _savegpr0_17
+_savegpr0_17:
+ std r17,-120(r1)
+.globl _savegpr0_18
+_savegpr0_18:
+ std r18,-112(r1)
+.globl _savegpr0_19
+_savegpr0_19:
+ std r19,-104(r1)
+.globl _savegpr0_20
+_savegpr0_20:
+ std r20,-96(r1)
+.globl _savegpr0_21
+_savegpr0_21:
+ std r21,-88(r1)
+.globl _savegpr0_22
+_savegpr0_22:
+ std r22,-80(r1)
+.globl _savegpr0_23
+_savegpr0_23:
+ std r23,-72(r1)
+.globl _savegpr0_24
+_savegpr0_24:
+ std r24,-64(r1)
+.globl _savegpr0_25
+_savegpr0_25:
+ std r25,-56(r1)
+.globl _savegpr0_26
+_savegpr0_26:
+ std r26,-48(r1)
+.globl _savegpr0_27
+_savegpr0_27:
+ std r27,-40(r1)
+.globl _savegpr0_28
+_savegpr0_28:
+ std r28,-32(r1)
+.globl _savegpr0_29
+_savegpr0_29:
+ std r29,-24(r1)
+.globl _savegpr0_30
+_savegpr0_30:
+ std r30,-16(r1)
+.globl _savegpr0_31
+_savegpr0_31:
+ std r31,-8(r1)
+ std r0,16(r1)
+ blr
+
+.globl _restgpr0_14
+_restgpr0_14:
+ ld r14,-144(r1)
+.globl _restgpr0_15
+_restgpr0_15:
+ ld r15,-136(r1)
+.globl _restgpr0_16
+_restgpr0_16:
+ ld r16,-128(r1)
+.globl _restgpr0_17
+_restgpr0_17:
+ ld r17,-120(r1)
+.globl _restgpr0_18
+_restgpr0_18:
+ ld r18,-112(r1)
+.globl _restgpr0_19
+_restgpr0_19:
+ ld r19,-104(r1)
+.globl _restgpr0_20
+_restgpr0_20:
+ ld r20,-96(r1)
+.globl _restgpr0_21
+_restgpr0_21:
+ ld r21,-88(r1)
+.globl _restgpr0_22
+_restgpr0_22:
+ ld r22,-80(r1)
+.globl _restgpr0_23
+_restgpr0_23:
+ ld r23,-72(r1)
+.globl _restgpr0_24
+_restgpr0_24:
+ ld r24,-64(r1)
+.globl _restgpr0_25
+_restgpr0_25:
+ ld r25,-56(r1)
+.globl _restgpr0_26
+_restgpr0_26:
+ ld r26,-48(r1)
+.globl _restgpr0_27
+_restgpr0_27:
+ ld r27,-40(r1)
+.globl _restgpr0_28
+_restgpr0_28:
+ ld r28,-32(r1)
+.globl _restgpr0_29
+_restgpr0_29:
+ ld r0,16(r1)
+ ld r29,-24(r1)
+ mtlr r0
+ ld r30,-16(r1)
+ ld r31,-8(r1)
+ blr
+
+.globl _restgpr0_30
+_restgpr0_30:
+ ld r30,-16(r1)
+.globl _restgpr0_31
+_restgpr0_31:
+ ld r0,16(r1)
+ ld r31,-8(r1)
+ mtlr r0
+ blr
+
+#endif /* CONFIG_PPC64 */
+
#endif
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index e640175b65ae..0d08d0171392 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -12,6 +12,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
@@ -288,8 +289,8 @@ static void test_alternative_case_with_external_branch(void)
static void test_cpu_macros(void)
{
- extern void ftr_fixup_test_FTR_macros;
- extern void ftr_fixup_test_FTR_macros_expected;
+ extern u8 ftr_fixup_test_FTR_macros;
+ extern u8 ftr_fixup_test_FTR_macros_expected;
unsigned long size = &ftr_fixup_test_FTR_macros_expected -
&ftr_fixup_test_FTR_macros;
@@ -301,8 +302,8 @@ static void test_cpu_macros(void)
static void test_fw_macros(void)
{
#ifdef CONFIG_PPC64
- extern void ftr_fixup_test_FW_FTR_macros;
- extern void ftr_fixup_test_FW_FTR_macros_expected;
+ extern u8 ftr_fixup_test_FW_FTR_macros;
+ extern u8 ftr_fixup_test_FW_FTR_macros_expected;
unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
&ftr_fixup_test_FW_FTR_macros;
@@ -314,10 +315,10 @@ static void test_fw_macros(void)
static void test_lwsync_macros(void)
{
- extern void lwsync_fixup_test;
- extern void end_lwsync_fixup_test;
- extern void lwsync_fixup_test_expected_LWSYNC;
- extern void lwsync_fixup_test_expected_SYNC;
+ extern u8 lwsync_fixup_test;
+ extern u8 end_lwsync_fixup_test;
+ extern u8 lwsync_fixup_test_expected_LWSYNC;
+ extern u8 lwsync_fixup_test_expected_SYNC;
unsigned long size = &end_lwsync_fixup_test -
&lwsync_fixup_test;
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index 3fc2e6494b8b..ab3962b0d246 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -445,7 +445,11 @@ void __init iSeries_pcibios_fixup_resources(struct pci_dev *pdev)
}
allocate_device_bars(pdev);
- iseries_device_information(pdev, bus, *sub_bus);
+ if (likely(sub_bus))
+ iseries_device_information(pdev, bus, *sub_bus);
+ else
+ printk(KERN_ERR "PCI: Device node %s has missing or invalid "
+ "linux,subbus property\n", node->full_name);
}
/*
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 0b96b5589f08..078d4ec1a9d9 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -110,7 +110,7 @@ int use_calgary __read_mostly = 0;
* x3950 (PCIE): 8 chassis, 32 PHBs per chassis = 256
* x3950 (PCIX): 8 chassis, 16 PHBs per chassis = 128
*/
-#define MAX_PHB_BUS_NUM 384
+#define MAX_PHB_BUS_NUM 256
#define PHBS_PER_CALGARY 4
@@ -1056,8 +1056,6 @@ static int __init calgary_init_one(struct pci_dev *dev)
struct iommu_table *tbl;
int ret;
- BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
-
bbar = busno_to_bbar(dev->bus->number);
ret = calgary_setup_tar(dev, bbar);
if (ret)
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
index f20eeec85a86..8acaddd0fb21 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -34,8 +34,7 @@
* memtype_lock protects the rbtree.
*/
-static void memtype_rb_augment_cb(struct rb_node *node);
-static struct rb_root memtype_rbroot = RB_AUGMENT_ROOT(&memtype_rb_augment_cb);
+static struct rb_root memtype_rbroot = RB_ROOT;
static int is_node_overlap(struct memtype *node, u64 start, u64 end)
{
@@ -56,7 +55,7 @@ static u64 get_subtree_max_end(struct rb_node *node)
}
/* Update 'subtree_max_end' for a node, based on node and its children */
-static void update_node_max_end(struct rb_node *node)
+static void memtype_rb_augment_cb(struct rb_node *node, void *__unused)
{
struct memtype *data;
u64 max_end, child_max_end;
@@ -78,25 +77,6 @@ static void update_node_max_end(struct rb_node *node)
data->subtree_max_end = max_end;
}
-/* Update 'subtree_max_end' for a node and all its ancestors */
-static void update_path_max_end(struct rb_node *node)
-{
- u64 old_max_end, new_max_end;
-
- while (node) {
- struct memtype *data = container_of(node, struct memtype, rb);
-
- old_max_end = data->subtree_max_end;
- update_node_max_end(node);
- new_max_end = data->subtree_max_end;
-
- if (new_max_end == old_max_end)
- break;
-
- node = rb_parent(node);
- }
-}
-
/* Find the first (lowest start addr) overlapping range from rb tree */
static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
u64 start, u64 end)
@@ -190,12 +170,6 @@ failure:
return -EBUSY;
}
-static void memtype_rb_augment_cb(struct rb_node *node)
-{
- if (node)
- update_path_max_end(node);
-}
-
static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
{
struct rb_node **node = &(root->rb_node);
@@ -213,6 +187,7 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
rb_link_node(&newdata->rb, parent, node);
rb_insert_color(&newdata->rb, root);
+ rb_augment_insert(&newdata->rb, memtype_rb_augment_cb, NULL);
}
int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type)
@@ -234,13 +209,16 @@ int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type)
struct memtype *rbt_memtype_erase(u64 start, u64 end)
{
+ struct rb_node *deepest;
struct memtype *data;
data = memtype_rb_exact_match(&memtype_rbroot, start, end);
if (!data)
goto out;
+ deepest = rb_augment_erase_begin(&data->rb);
rb_erase(&data->rb, &memtype_rbroot);
+ rb_augment_erase_end(deepest, memtype_rb_augment_cb, NULL);
out:
return data;
}
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 6f907ebed2d5..6d34f405a2f3 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -37,7 +37,7 @@
#include <linux/wait.h>
#include <linux/skbuff.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 40aec0fb8596..42d69d4de05c 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -244,7 +244,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
if (rel) {
hdr[0] |= 0x80 + bcsp->msgq_txseq;
BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq);
- bcsp->msgq_txseq = ++(bcsp->msgq_txseq) & 0x07;
+ bcsp->msgq_txseq = (bcsp->msgq_txseq + 1) & 0x07;
}
if (bcsp->use_crc)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1f2cc6b09623..719662034bbf 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -315,8 +315,9 @@ static void drm_fb_helper_on(struct fb_info *info)
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_connector *connector;
struct drm_encoder *encoder;
- int i;
+ int i, j;
/*
* For each CRTC in this fb, turn the crtc on then,
@@ -332,7 +333,14 @@ static void drm_fb_helper_on(struct fb_info *info)
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-
+ /* Walk the connectors & encoders on this fb turning them on */
+ for (j = 0; j < fb_helper->connector_count; j++) {
+ connector = fb_helper->connector_info[j]->connector;
+ connector->dpms = DRM_MODE_DPMS_ON;
+ drm_connector_property_set_value(connector,
+ dev->mode_config.dpms_property,
+ DRM_MODE_DPMS_ON);
+ }
/* Found a CRTC on this fb, now find encoders */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
@@ -352,8 +360,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_connector *connector;
struct drm_encoder *encoder;
- int i;
+ int i, j;
/*
* For each CRTC in this fb, find all associated encoders
@@ -367,6 +376,14 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
if (!crtc->enabled)
continue;
+ /* Walk the connectors on this fb and mark them off */
+ for (j = 0; j < fb_helper->connector_count; j++) {
+ connector = fb_helper->connector_info[j]->connector;
+ connector->dpms = dpms_mode;
+ drm_connector_property_set_value(connector,
+ dev->mode_config.dpms_property,
+ dpms_mode);
+ }
/* Found a CRTC on this fb, now find encoders */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 0c7ccc6961a3..f58f8bd8f77b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -785,7 +785,9 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
if (connector == list_connector)
continue;
list_radeon_connector = to_radeon_connector(list_connector);
- if (radeon_connector->devices == list_radeon_connector->devices) {
+ if (list_radeon_connector->shared_ddc &&
+ (list_radeon_connector->ddc_bus->rec.i2c_id ==
+ radeon_connector->ddc_bus->rec.i2c_id)) {
if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
kfree(radeon_connector->edid);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 2f047577b1e3..b1d67dc973dc 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -104,7 +104,6 @@ struct ttm_pool_opts {
struct ttm_pool_manager {
struct kobject kobj;
struct shrinker mm_shrink;
- atomic_t page_alloc_inited;
struct ttm_pool_opts options;
union {
@@ -142,7 +141,7 @@ static void ttm_pool_kobj_release(struct kobject *kobj)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
- (void)m;
+ kfree(m);
}
static ssize_t ttm_pool_store(struct kobject *kobj,
@@ -214,9 +213,7 @@ static struct kobj_type ttm_pool_kobj_type = {
.default_attrs = ttm_pool_attrs,
};
-static struct ttm_pool_manager _manager = {
- .page_alloc_inited = ATOMIC_INIT(0)
-};
+static struct ttm_pool_manager *_manager;
#ifndef CONFIG_X86
static int set_pages_array_wb(struct page **pages, int addrinarray)
@@ -271,7 +268,7 @@ static struct ttm_page_pool *ttm_get_pool(int flags,
if (flags & TTM_PAGE_FLAG_DMA32)
pool_index |= 0x2;
- return &_manager.pools[pool_index];
+ return &_manager->pools[pool_index];
}
/* set memory back to wb and free the pages. */
@@ -387,7 +384,7 @@ static int ttm_pool_get_num_unused_pages(void)
unsigned i;
int total = 0;
for (i = 0; i < NUM_POOLS; ++i)
- total += _manager.pools[i].npages;
+ total += _manager->pools[i].npages;
return total;
}
@@ -408,7 +405,7 @@ static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
unsigned nr_free = shrink_pages;
if (shrink_pages == 0)
break;
- pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
+ pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
}
/* return estimated number of unused pages in pool */
@@ -576,10 +573,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
/* If allocation request is small and there is not enough
* pages in pool we fill the pool first */
- if (count < _manager.options.small
+ if (count < _manager->options.small
&& count > pool->npages) {
struct list_head new_pages;
- unsigned alloc_size = _manager.options.alloc_size;
+ unsigned alloc_size = _manager->options.alloc_size;
/**
* Can't change page caching if in irqsave context. We have to
@@ -759,8 +756,8 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
pool->npages += page_count;
/* Check that we don't go over the pool limit */
page_count = 0;
- if (pool->npages > _manager.options.max_size) {
- page_count = pool->npages - _manager.options.max_size;
+ if (pool->npages > _manager->options.max_size) {
+ page_count = pool->npages - _manager->options.max_size;
/* free at least NUM_PAGES_TO_ALLOC number of pages
* to reduce calls to set_memory_wb */
if (page_count < NUM_PAGES_TO_ALLOC)
@@ -785,33 +782,36 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
{
int ret;
- if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
- return 0;
+
+ WARN_ON(_manager);
printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
- ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
+ _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
- ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
+ ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
- ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
- "wc dma");
+ ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
- ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
- "uc dma");
+ ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
+ GFP_USER | GFP_DMA32, "wc dma");
- _manager.options.max_size = max_pages;
- _manager.options.small = SMALL_ALLOCATION;
- _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
+ ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
+ GFP_USER | GFP_DMA32, "uc dma");
- kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
- ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
+ _manager->options.max_size = max_pages;
+ _manager->options.small = SMALL_ALLOCATION;
+ _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+ &glob->kobj, "pool");
if (unlikely(ret != 0)) {
- kobject_put(&_manager.kobj);
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
return ret;
}
- ttm_pool_mm_shrink_init(&_manager);
+ ttm_pool_mm_shrink_init(_manager);
return 0;
}
@@ -820,16 +820,14 @@ void ttm_page_alloc_fini()
{
int i;
- if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
- return;
-
printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
- ttm_pool_mm_shrink_fini(&_manager);
+ ttm_pool_mm_shrink_fini(_manager);
for (i = 0; i < NUM_POOLS; ++i)
- ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
+ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
- kobject_put(&_manager.kobj);
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
}
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
@@ -837,14 +835,14 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
struct ttm_page_pool *p;
unsigned i;
char *h[] = {"pool", "refills", "pages freed", "size"};
- if (atomic_read(&_manager.page_alloc_inited) == 0) {
+ if (!_manager) {
seq_printf(m, "No pool allocator running.\n");
return 0;
}
seq_printf(m, "%6s %12s %13s %8s\n",
h[0], h[1], h[2], h[3]);
for (i = 0; i < NUM_POOLS; ++i) {
- p = &_manager.pools[i];
+ p = &_manager->pools[i];
seq_printf(m, "%6s %12ld %13ld %8d\n",
p->name, p->nrefills,
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 40fdc41446cc..df483076eda6 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -340,7 +340,8 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
if ((client_info->assigned) &&
(client_info->ip_src == arp->ip_dst) &&
- (client_info->ip_dst == arp->ip_src)) {
+ (client_info->ip_dst == arp->ip_src) &&
+ (compare_ether_addr_64bits(client_info->mac_dst, arp->mac_src))) {
/* update the clients MAC address */
memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
client_info->ntt = 1;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5e12462a9d5e..c3d98dde2f86 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -168,7 +168,7 @@ static int arp_ip_count;
static int bond_mode = BOND_MODE_ROUNDROBIN;
static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
static int lacp_fast;
-
+static int disable_netpoll = 1;
const struct bond_parm_tbl bond_lacp_tbl[] = {
{ "slow", AD_LACP_SLOW},
@@ -1742,15 +1742,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
#ifdef CONFIG_NET_POLL_CONTROLLER
- if (slaves_support_netpoll(bond_dev)) {
- bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
- if (bond_dev->npinfo)
- slave_dev->npinfo = bond_dev->npinfo;
- } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
+ /*
+ * Netpoll and bonding is broken, make sure it is not initialized
+ * until it is fixed.
+ */
+ if (disable_netpoll) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
- pr_info("New slave device %s does not support netpoll\n",
- slave_dev->name);
- pr_info("Disabling netpoll support for %s\n", bond_dev->name);
+ } else {
+ if (slaves_support_netpoll(bond_dev)) {
+ bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ if (bond_dev->npinfo)
+ slave_dev->npinfo = bond_dev->npinfo;
+ } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
+ bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ pr_info("New slave device %s does not support netpoll\n",
+ slave_dev->name);
+ pr_info("Disabling netpoll support for %s\n", bond_dev->name);
+ }
}
#endif
read_unlock(&bond->lock);
@@ -1950,8 +1958,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
#ifdef CONFIG_NET_POLL_CONTROLLER
read_lock_bh(&bond->lock);
- if (slaves_support_netpoll(bond_dev))
- bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+
+ /* Make sure netpoll over stays disabled until fixed. */
+ if (!disable_netpoll)
+ if (slaves_support_netpoll(bond_dev))
+ bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
read_unlock_bh(&bond->lock);
if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index ce30c62a97f7..7b5d9764f317 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3684,10 +3684,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* signal that we are down to the interrupt handler */
set_bit(__IXGBE_DOWN, &adapter->state);
- /* power down the optics */
- if (hw->phy.multispeed_fiber)
- hw->mac.ops.disable_tx_laser(hw);
-
/* disable receive for all VFs and wait one second */
if (adapter->num_vfs) {
/* ping all the active vfs to let them know we are going down */
@@ -3742,6 +3738,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
+ /* power down the optics */
+ if (hw->phy.multispeed_fiber)
+ hw->mac.ops.disable_tx_laser(hw);
+
/* clear n-tuple filters that are cached */
ethtool_ntuple_flush(netdev);
@@ -4001,7 +4001,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
done:
/* Notify the stack of the (possibly) reduced Tx Queue count. */
- adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
+ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
}
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -5195,7 +5195,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
ixgbe_free_all_tx_resources(adapter);
ixgbe_free_all_rx_resources(adapter);
}
- ixgbe_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -5230,6 +5229,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = !!wufc;
+ ixgbe_clear_interrupt_scheme(adapter);
+
ixgbe_release_hw_control(adapter);
pci_disable_device(pdev);
@@ -6023,7 +6024,6 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
int queue, u32 tx_flags)
{
- /* Right now, we support IPv4 only */
struct ixgbe_atr_input atr_input;
struct tcphdr *th;
struct iphdr *iph = ip_hdr(skb);
@@ -6032,6 +6032,9 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
u32 src_ipv4_addr, dst_ipv4_addr;
u8 l4type = 0;
+ /* Right now, we support IPv4 only */
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
/* check if we're UDP or TCP */
if (iph->protocol == IPPROTO_TCP) {
th = tcp_hdr(skb);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 52dcc8495647..6474c4973d3a 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -964,7 +964,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
- goto nodev;
+ goto err_iounmap;
}
/* Setup the DMA register accesses, could be DCR or memory mapped */
@@ -978,7 +978,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
} else {
dev_err(&op->dev, "unable to map DMA registers\n");
- goto nodev;
+ goto err_iounmap;
}
}
@@ -987,7 +987,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
dev_err(&op->dev, "could not determine irqs\n");
rc = -ENOMEM;
- goto nodev;
+ goto err_iounmap_2;
}
of_node_put(np); /* Finished with the DMA node; drop the reference */
@@ -997,7 +997,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
if ((!addr) || (size != 6)) {
dev_err(&op->dev, "could not find MAC address\n");
rc = -ENODEV;
- goto nodev;
+ goto err_iounmap_2;
}
temac_set_mac_address(ndev, (void *)addr);
@@ -1013,7 +1013,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
if (rc) {
dev_err(lp->dev, "Error creating sysfs files\n");
- goto nodev;
+ goto err_iounmap_2;
}
rc = register_netdev(lp->ndev);
@@ -1026,6 +1026,11 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
err_register_ndev:
sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
+ err_iounmap_2:
+ if (lp->sdma_regs)
+ iounmap(lp->sdma_regs);
+ err_iounmap:
+ iounmap(lp->regs);
nodev:
free_netdev(ndev);
ndev = NULL;
@@ -1044,6 +1049,9 @@ static int __devexit temac_of_remove(struct of_device *op)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
dev_set_drvdata(&op->dev, NULL);
+ iounmap(lp->regs);
+ if (lp->sdma_regs)
+ iounmap(lp->sdma_regs);
free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index e345ec8cb473..73bb8ea6f54a 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -289,6 +289,7 @@ struct mv643xx_eth_shared_private {
unsigned int t_clk;
int extended_rx_coal_limit;
int tx_bw_control;
+ int tx_csum_limit;
};
#define TX_BW_CONTROL_ABSENT 0
@@ -776,13 +777,16 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
l4i_chk = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int hdr_len;
int tag_bytes;
BUG_ON(skb->protocol != htons(ETH_P_IP) &&
skb->protocol != htons(ETH_P_8021Q));
- tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN;
- if (unlikely(tag_bytes & ~12)) {
+ hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
+ tag_bytes = hdr_len - ETH_HLEN;
+ if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
+ unlikely(tag_bytes & ~12)) {
if (skb_checksum_help(skb) == 0)
goto no_csum;
kfree_skb(skb);
@@ -2666,6 +2670,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
* Detect hardware parameters.
*/
msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
+ msp->tx_csum_limit = pd->tx_csum_limit ? pd->tx_csum_limit : 9 * 1024;
infer_hw_params(msp);
platform_set_drvdata(pdev, msp);
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index b8e2923a1d69..1063093b3afc 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -806,8 +806,10 @@ static int __init ne_drv_probe(struct platform_device *pdev)
dev->base_addr = res->start;
dev->irq = platform_get_irq(pdev, 0);
} else {
- if (this_dev < 0 || this_dev >= MAX_NE_CARDS)
+ if (this_dev < 0 || this_dev >= MAX_NE_CARDS) {
+ free_netdev(dev);
return -EINVAL;
+ }
dev->base_addr = io[this_dev];
dev->irq = irq[this_dev];
dev->mem_end = bad[this_dev];
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index fa4b24c49f42..d10bcefc0e45 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -4611,8 +4611,7 @@ static void ql_timer(unsigned long data)
return;
}
- qdev->timer.expires = jiffies + (5*HZ);
- add_timer(&qdev->timer);
+ mod_timer(&qdev->timer, jiffies + (5*HZ));
}
static int __devinit qlge_probe(struct pci_dev *pdev,
@@ -4713,6 +4712,8 @@ static void ql_eeh_close(struct net_device *ndev)
netif_stop_queue(ndev);
}
+ /* Disabling the timer */
+ del_timer_sync(&qdev->timer);
if (test_bit(QL_ADAPTER_UP, &qdev->flags))
cancel_delayed_work_sync(&qdev->asic_reset_work);
cancel_delayed_work_sync(&qdev->mpi_reset_work);
@@ -4808,8 +4809,7 @@ static void qlge_io_resume(struct pci_dev *pdev)
netif_err(qdev, ifup, qdev->ndev,
"Device was not running prior to EEH.\n");
}
- qdev->timer.expires = jiffies + (5*HZ);
- add_timer(&qdev->timer);
+ mod_timer(&qdev->timer, jiffies + (5*HZ));
netif_device_attach(ndev);
}
@@ -4871,8 +4871,7 @@ static int qlge_resume(struct pci_dev *pdev)
return err;
}
- qdev->timer.expires = jiffies + (5*HZ);
- add_timer(&qdev->timer);
+ mod_timer(&qdev->timer, jiffies + (5*HZ));
netif_device_attach(ndev);
return 0;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 668327ccd8d0..1d37f0c310ca 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3130,7 +3130,6 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
pkt_cnt++;
/* Updating the statistics block */
- nic->dev->stats.tx_bytes += skb->len;
swstats->mem_freed += skb->truesize;
dev_kfree_skb_irq(skb);
@@ -4901,48 +4900,81 @@ static void s2io_updt_stats(struct s2io_nic *sp)
* Return value:
* pointer to the updated net_device_stats structure.
*/
-
static struct net_device_stats *s2io_get_stats(struct net_device *dev)
{
struct s2io_nic *sp = netdev_priv(dev);
- struct config_param *config = &sp->config;
struct mac_info *mac_control = &sp->mac_control;
struct stat_block *stats = mac_control->stats_info;
- int i;
+ u64 delta;
/* Configure Stats for immediate updt */
s2io_updt_stats(sp);
- /* Using sp->stats as a staging area, because reset (due to mtu
- change, for example) will clear some hardware counters */
- dev->stats.tx_packets += le32_to_cpu(stats->tmac_frms) -
- sp->stats.tx_packets;
- sp->stats.tx_packets = le32_to_cpu(stats->tmac_frms);
-
- dev->stats.tx_errors += le32_to_cpu(stats->tmac_any_err_frms) -
- sp->stats.tx_errors;
- sp->stats.tx_errors = le32_to_cpu(stats->tmac_any_err_frms);
-
- dev->stats.rx_errors += le64_to_cpu(stats->rmac_drop_frms) -
- sp->stats.rx_errors;
- sp->stats.rx_errors = le64_to_cpu(stats->rmac_drop_frms);
-
- dev->stats.multicast = le32_to_cpu(stats->rmac_vld_mcst_frms) -
- sp->stats.multicast;
- sp->stats.multicast = le32_to_cpu(stats->rmac_vld_mcst_frms);
-
- dev->stats.rx_length_errors = le64_to_cpu(stats->rmac_long_frms) -
- sp->stats.rx_length_errors;
- sp->stats.rx_length_errors = le64_to_cpu(stats->rmac_long_frms);
+ /* A device reset will cause the on-adapter statistics to be zero'ed.
+ * This can be done while running by changing the MTU. To prevent the
+ * system from having the stats zero'ed, the driver keeps a copy of the
+ * last update to the system (which is also zero'ed on reset). This
+ * enables the driver to accurately know the delta between the last
+ * update and the current update.
+ */
+ delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
+ sp->stats.rx_packets += delta;
+ dev->stats.rx_packets += delta;
+
+ delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
+ le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
+ sp->stats.tx_packets += delta;
+ dev->stats.tx_packets += delta;
+
+ delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
+ le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
+ sp->stats.rx_bytes += delta;
+ dev->stats.rx_bytes += delta;
+
+ delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
+ le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
+ sp->stats.tx_bytes += delta;
+ dev->stats.tx_bytes += delta;
+
+ delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
+ sp->stats.rx_errors += delta;
+ dev->stats.rx_errors += delta;
+
+ delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
+ le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
+ sp->stats.tx_errors += delta;
+ dev->stats.tx_errors += delta;
+
+ delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
+ sp->stats.rx_dropped += delta;
+ dev->stats.rx_dropped += delta;
+
+ delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
+ sp->stats.tx_dropped += delta;
+ dev->stats.tx_dropped += delta;
+
+ /* The adapter MAC interprets pause frames as multicast packets, but
+ * does not pass them up. This erroneously increases the multicast
+ * packet count and needs to be deducted when the multicast frame count
+ * is queried.
+ */
+ delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_vld_mcst_frms);
+ delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
+ delta -= sp->stats.multicast;
+ sp->stats.multicast += delta;
+ dev->stats.multicast += delta;
- /* collect per-ring rx_packets and rx_bytes */
- dev->stats.rx_packets = dev->stats.rx_bytes = 0;
- for (i = 0; i < config->rx_ring_num; i++) {
- struct ring_info *ring = &mac_control->rings[i];
+ delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
+ le32_to_cpu(stats->rmac_usized_frms)) +
+ le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
+ sp->stats.rx_length_errors += delta;
+ dev->stats.rx_length_errors += delta;
- dev->stats.rx_packets += ring->rx_packets;
- dev->stats.rx_bytes += ring->rx_bytes;
- }
+ delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
+ sp->stats.rx_crc_errors += delta;
+ dev->stats.rx_crc_errors += delta;
return &dev->stats;
}
@@ -7455,15 +7487,11 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
}
}
- /* Updating statistics */
- ring_data->rx_packets++;
rxdp->Host_Control = 0;
if (sp->rxd_mode == RXD_MODE_1) {
int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
- ring_data->rx_bytes += len;
skb_put(skb, len);
-
} else if (sp->rxd_mode == RXD_MODE_3B) {
int get_block = ring_data->rx_curr_get_info.block_index;
int get_off = ring_data->rx_curr_get_info.offset;
@@ -7472,7 +7500,6 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
unsigned char *buff = skb_push(skb, buf0_len);
struct buffAdd *ba = &ring_data->ba[get_block][get_off];
- ring_data->rx_bytes += buf0_len + buf2_len;
memcpy(buff, ba->ba_0, buf0_len);
skb_put(skb, buf2_len);
}
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 47c36e0994f5..5e52c75892df 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -745,10 +745,6 @@ struct ring_info {
/* Buffer Address store. */
struct buffAdd **ba;
-
- /* per-Ring statistics */
- unsigned long rx_packets;
- unsigned long rx_bytes;
} ____cacheline_aligned;
/* Fifo specific structure */
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 1f3acc3a5dfd..79eee3062083 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2671,6 +2671,7 @@ static struct platform_driver sbmac_driver = {
.remove = __exit_p(sbmac_remove),
.driver = {
.name = sbmac_string,
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 28d3ee175e7b..dd8a4adf48ca 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -104,10 +104,8 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg,
int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
{
struct cdc_state *info = (void *) &dev->data;
- struct usb_cdc_notification notification;
int master_ifnum;
int retval;
- int partial;
unsigned count;
__le32 rsp;
u32 xid = 0, msg_len, request_id;
@@ -135,17 +133,13 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
if (unlikely(retval < 0 || xid == 0))
return retval;
- /* Some devices don't respond on the control channel until
- * polled on the status channel, so do that first. */
- retval = usb_interrupt_msg(
- dev->udev,
- usb_rcvintpipe(dev->udev, dev->status->desc.bEndpointAddress),
- &notification, sizeof(notification), &partial,
- RNDIS_CONTROL_TIMEOUT_MS);
- if (unlikely(retval < 0))
- return retval;
+ // FIXME Seems like some devices discard responses when
+ // we time out and cancel our "get response" requests...
+ // so, this is fragile. Probably need to poll for status.
- /* Poll the control channel; the request probably completed immediately */
+ /* ignore status endpoint, just poll the control channel;
+ * the request probably completed immediately
+ */
rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
for (count = 0; count < 10; count++) {
memset(buf, 0, CONTROL_BUFFER_SIZE);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index a95c73de5824..81c76ada8e56 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1293,6 +1293,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
goto out;
}
+ /* netdev_printk() needs this so do it as early as possible */
+ SET_NETDEV_DEV(net, &udev->dev);
+
dev = netdev_priv(net);
dev->udev = xdev;
dev->intf = udev;
@@ -1377,8 +1380,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->rx_urb_size = dev->hard_mtu;
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
- SET_NETDEV_DEV(net, &udev->dev);
-
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
SET_NETDEV_DEVTYPE(net, &wlan_type);
if ((dev->driver_info->flags & FLAG_WWAN) != 0)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1edb7a61983c..bb6b67f6b0cc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -415,7 +415,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
{
int err;
- bool oom = false;
+ bool oom;
do {
if (vi->mergeable_rx_bufs)
@@ -425,10 +425,9 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
else
err = add_recvbuf_small(vi, gfp);
- if (err < 0) {
- oom = true;
+ oom = err == -ENOMEM;
+ if (err < 0)
break;
- }
++vi->num;
} while (err > 0);
if (unlikely(vi->num > vi->max))
@@ -563,7 +562,6 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int capacity;
-again:
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(vi);
@@ -572,14 +570,20 @@ again:
/* This can happen with OOM and indirect buffers. */
if (unlikely(capacity < 0)) {
- netif_stop_queue(dev);
- dev_warn(&dev->dev, "Unexpected full queue\n");
- if (unlikely(!virtqueue_enable_cb(vi->svq))) {
- virtqueue_disable_cb(vi->svq);
- netif_start_queue(dev);
- goto again;
+ if (net_ratelimit()) {
+ if (likely(capacity == -ENOMEM)) {
+ dev_warn(&dev->dev,
+ "TX queue failure: out of memory\n");
+ } else {
+ dev->stats.tx_fifo_errors++;
+ dev_warn(&dev->dev,
+ "Unexpected TX queue failure: %d\n",
+ capacity);
+ }
}
- return NETDEV_TX_BUSY;
+ dev->stats.tx_dropped++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
virtqueue_kick(vi->svq);
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index d14e207de1df..fc8b2d7a0919 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4517,9 +4517,9 @@ vxge_starter(void)
char version[32];
snprintf(version, 32, "%s", DRV_VERSION);
- printk(KERN_CRIT "%s: Copyright(c) 2002-2009 Neterion Inc\n",
+ printk(KERN_INFO "%s: Copyright(c) 2002-2009 Neterion Inc\n",
VXGE_DRIVER_NAME);
- printk(KERN_CRIT "%s: Driver version: %s\n",
+ printk(KERN_INFO "%s: Driver version: %s\n",
VXGE_DRIVER_NAME, version);
verify_bandwidth();
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index fbb7dec6ddeb..5ea87736a6ae 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -445,6 +445,7 @@ void ath_deinit_leds(struct ath_softc *sc);
#define SC_OP_TSF_RESET BIT(11)
#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
#define SC_OP_BT_SCAN BIT(13)
+#define SC_OP_ANI_RUN BIT(14)
/* Powersave flags */
#define PS_WAIT_FOR_BEACON BIT(0)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index abfa0493236f..1e2a68ea9355 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -336,6 +336,10 @@ set_timer:
static void ath_start_ani(struct ath_common *common)
{
unsigned long timestamp = jiffies_to_msecs(jiffies);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ if (!(sc->sc_flags & SC_OP_ANI_RUN))
+ return;
common->ani.longcal_timer = timestamp;
common->ani.shortcal_timer = timestamp;
@@ -872,11 +876,13 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
/* Reset rssi stats */
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
+ sc->sc_flags |= SC_OP_ANI_RUN;
ath_start_ani(common);
} else {
ath_print(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
common->curaid = 0;
/* Stop ANI */
+ sc->sc_flags &= ~SC_OP_ANI_RUN;
del_timer_sync(&common->ani.timer);
}
}
@@ -1478,8 +1484,10 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MONITOR)
+ vif->type == NL80211_IFTYPE_MONITOR) {
+ sc->sc_flags |= SC_OP_ANI_RUN;
ath_start_ani(common);
+ }
out:
mutex_unlock(&sc->mutex);
@@ -1500,6 +1508,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
/* Stop ANI */
+ sc->sc_flags &= ~SC_OP_ANI_RUN;
del_timer_sync(&common->ani.timer);
/* Reclaim beacon resources */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 44ef5d93befc..01658cf82d39 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -212,11 +212,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
__le32 *tx_flags)
{
- if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
- (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
- *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
- else
- *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
}
/* Calc max signal level (dBm) among 3 possible receivers */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 426e95567de3..5bbc5298ef96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1314,7 +1314,6 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
changed_flags, *total_flags);
CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- CHK(FIF_ALLMULTI, RXON_FILTER_ACCEPT_GRP_MSK);
CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
@@ -1329,6 +1328,12 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in iwl_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 9fc339845538..eac961463be2 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1356,6 +1356,7 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev,
INIT_LIST_HEAD(&socket->devices_list);
memset(&socket->pcmcia_state, 0, sizeof(u8));
socket->device_count = 0;
+ atomic_set(&socket->present, 0);
ret = pccard_register_pcmcia(socket, &pcmcia_bus_callback);
if (ret) {
@@ -1364,8 +1365,6 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev,
return ret;
}
- atomic_set(&socket->present, 0);
-
return 0;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index df5b6b971f26..57a593c58cf4 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -98,7 +98,8 @@ static void tx_poll_start(struct vhost_net *net, struct socket *sock)
static void handle_tx(struct vhost_net *net)
{
struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
- unsigned head, out, in, s;
+ unsigned out, in, s;
+ int head;
struct msghdr msg = {
.msg_name = NULL,
.msg_namelen = 0,
@@ -135,6 +136,9 @@ static void handle_tx(struct vhost_net *net)
ARRAY_SIZE(vq->iov),
&out, &in,
NULL, NULL);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(head < 0))
+ break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
wmem = atomic_read(&sock->sk->sk_wmem_alloc);
@@ -192,7 +196,8 @@ static void handle_tx(struct vhost_net *net)
static void handle_rx(struct vhost_net *net)
{
struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
- unsigned head, out, in, log, s;
+ unsigned out, in, log, s;
+ int head;
struct vhost_log *vq_log;
struct msghdr msg = {
.msg_name = NULL,
@@ -228,6 +233,9 @@ static void handle_rx(struct vhost_net *net)
ARRAY_SIZE(vq->iov),
&out, &in,
vq_log, &log);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(head < 0))
+ break;
/* OK, now we need to know about added descriptors. */
if (head == vq->num) {
if (unlikely(vhost_enable_notify(vq))) {
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 3b83382e06eb..0b99783083f6 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -736,12 +736,12 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
mem = rcu_dereference(dev->memory);
while ((u64)len > s) {
u64 size;
- if (ret >= iov_size) {
+ if (unlikely(ret >= iov_size)) {
ret = -ENOBUFS;
break;
}
reg = find_region(mem, addr, len);
- if (!reg) {
+ if (unlikely(!reg)) {
ret = -EFAULT;
break;
}
@@ -780,18 +780,18 @@ static unsigned next_desc(struct vring_desc *desc)
return next;
}
-static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
- struct iovec iov[], unsigned int iov_size,
- unsigned int *out_num, unsigned int *in_num,
- struct vhost_log *log, unsigned int *log_num,
- struct vring_desc *indirect)
+static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num,
+ struct vring_desc *indirect)
{
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
int ret;
/* Sanity check */
- if (indirect->len % sizeof desc) {
+ if (unlikely(indirect->len % sizeof desc)) {
vq_err(vq, "Invalid length in indirect descriptor: "
"len 0x%llx not multiple of 0x%zx\n",
(unsigned long long)indirect->len,
@@ -801,7 +801,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
ARRAY_SIZE(vq->indirect));
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d in indirect.\n", ret);
return ret;
}
@@ -813,7 +813,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
count = indirect->len / sizeof desc;
/* Buffers are chained via a 16 bit next field, so
* we can have at most 2^16 of these. */
- if (count > USHRT_MAX + 1) {
+ if (unlikely(count > USHRT_MAX + 1)) {
vq_err(vq, "Indirect buffer length too big: %d\n",
indirect->len);
return -E2BIG;
@@ -821,19 +821,19 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
do {
unsigned iov_count = *in_num + *out_num;
- if (++found > count) {
+ if (unlikely(++found > count)) {
vq_err(vq, "Loop detected: last one at %u "
"indirect size %u\n",
i, count);
return -EINVAL;
}
- if (memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
- sizeof desc)) {
+ if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
+ sizeof desc))) {
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
i, (size_t)indirect->addr + i * sizeof desc);
return -EINVAL;
}
- if (desc.flags & VRING_DESC_F_INDIRECT) {
+ if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
i, (size_t)indirect->addr + i * sizeof desc);
return -EINVAL;
@@ -841,7 +841,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
iov_size - iov_count);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d indirect idx %d\n",
ret, i);
return ret;
@@ -857,7 +857,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
} else {
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
- if (*in_num) {
+ if (unlikely(*in_num)) {
vq_err(vq, "Indirect descriptor "
"has out after in: idx %d\n", i);
return -EINVAL;
@@ -873,12 +873,13 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
* number of output then some number of input descriptors, it's actually two
* iovecs, but we pack them into one and note how many of each there were.
*
- * This function returns the descriptor number found, or vq->num (which
- * is never a valid descriptor number) if none was found. */
-unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
- struct iovec iov[], unsigned int iov_size,
- unsigned int *out_num, unsigned int *in_num,
- struct vhost_log *log, unsigned int *log_num)
+ * This function returns the descriptor number found, or vq->num (which is
+ * never a valid descriptor number) if none was found. A negative code is
+ * returned on error. */
+int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num)
{
struct vring_desc desc;
unsigned int i, head, found = 0;
@@ -887,16 +888,16 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vq->last_avail_idx;
- if (get_user(vq->avail_idx, &vq->avail->idx)) {
+ if (unlikely(get_user(vq->avail_idx, &vq->avail->idx))) {
vq_err(vq, "Failed to access avail idx at %p\n",
&vq->avail->idx);
- return vq->num;
+ return -EFAULT;
}
- if ((u16)(vq->avail_idx - last_avail_idx) > vq->num) {
+ if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
vq_err(vq, "Guest moved used index from %u to %u",
last_avail_idx, vq->avail_idx);
- return vq->num;
+ return -EFAULT;
}
/* If there's nothing new since last we looked, return invalid. */
@@ -908,18 +909,19 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
- if (get_user(head, &vq->avail->ring[last_avail_idx % vq->num])) {
+ if (unlikely(get_user(head,
+ &vq->avail->ring[last_avail_idx % vq->num]))) {
vq_err(vq, "Failed to read head: idx %d address %p\n",
last_avail_idx,
&vq->avail->ring[last_avail_idx % vq->num]);
- return vq->num;
+ return -EFAULT;
}
/* If their number is silly, that's an error. */
- if (head >= vq->num) {
+ if (unlikely(head >= vq->num)) {
vq_err(vq, "Guest says index %u > %u is available",
head, vq->num);
- return vq->num;
+ return -EINVAL;
}
/* When we start there are none of either input nor output. */
@@ -930,41 +932,41 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
i = head;
do {
unsigned iov_count = *in_num + *out_num;
- if (i >= vq->num) {
+ if (unlikely(i >= vq->num)) {
vq_err(vq, "Desc index is %u > %u, head = %u",
i, vq->num, head);
- return vq->num;
+ return -EINVAL;
}
- if (++found > vq->num) {
+ if (unlikely(++found > vq->num)) {
vq_err(vq, "Loop detected: last one at %u "
"vq size %u head %u\n",
i, vq->num, head);
- return vq->num;
+ return -EINVAL;
}
ret = copy_from_user(&desc, vq->desc + i, sizeof desc);
- if (ret) {
+ if (unlikely(ret)) {
vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
i, vq->desc + i);
- return vq->num;
+ return -EFAULT;
}
if (desc.flags & VRING_DESC_F_INDIRECT) {
ret = get_indirect(dev, vq, iov, iov_size,
out_num, in_num,
log, log_num, &desc);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
vq_err(vq, "Failure detected "
"in indirect descriptor at idx %d\n", i);
- return vq->num;
+ return ret;
}
continue;
}
ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
iov_size - iov_count);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d descriptor idx %d\n",
ret, i);
- return vq->num;
+ return ret;
}
if (desc.flags & VRING_DESC_F_WRITE) {
/* If this is an input descriptor,
@@ -978,10 +980,10 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
} else {
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
- if (*in_num) {
+ if (unlikely(*in_num)) {
vq_err(vq, "Descriptor has out after in: "
"idx %d\n", i);
- return vq->num;
+ return -EINVAL;
}
*out_num += ret;
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 44591ba9b07a..11ee13dba0f7 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -120,10 +120,10 @@ long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
int vhost_vq_access_ok(struct vhost_virtqueue *vq);
int vhost_log_access_ok(struct vhost_dev *);
-unsigned vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
- struct iovec iov[], unsigned int iov_count,
- unsigned int *out_num, unsigned int *in_num,
- struct vhost_log *log, unsigned int *log_num);
+int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
+ struct iovec iov[], unsigned int iov_count,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num);
void vhost_discard_vq_desc(struct vhost_virtqueue *);
int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 3dab9e9948d0..722743b152d8 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -680,7 +680,6 @@ int afs_writeback_all(struct afs_vnode *vnode)
{
struct address_space *mapping = vnode->vfs_inode.i_mapping;
struct writeback_control wbc = {
- .bdi = mapping->backing_dev_info,
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
.range_cyclic = 1,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index a4080c21ec55..d74e6af9b53a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2594,7 +2594,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
struct writeback_control wbc_writepages = {
- .bdi = wbc->bdi,
.sync_mode = wbc->sync_mode,
.older_than_this = NULL,
.nr_to_write = 64,
@@ -2628,7 +2627,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
.sync_io = mode == WB_SYNC_ALL,
};
struct writeback_control wbc_writepages = {
- .bdi = inode->i_mapping->backing_dev_info,
.sync_mode = mode,
.older_than_this = NULL,
.nr_to_write = nr_pages * 2,
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c
index 83d4d2785ffe..3fe49042d8ad 100644
--- a/fs/ceph/auth_x.c
+++ b/fs/ceph/auth_x.c
@@ -493,7 +493,7 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
return -EAGAIN;
}
- op = le32_to_cpu(head->op);
+ op = le16_to_cpu(head->op);
result = le32_to_cpu(head->result);
dout("handle_reply op %d result %d\n", op, result);
switch (op) {
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 619b61655ee5..74144d6389f0 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -244,8 +244,14 @@ static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx)
struct ceph_cap *cap = NULL;
/* temporary, until we do something about cap import/export */
- if (!ctx)
- return kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
+ if (!ctx) {
+ cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
+ if (cap) {
+ caps_use_count++;
+ caps_total_count++;
+ }
+ return cap;
+ }
spin_lock(&caps_list_lock);
dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
@@ -2886,18 +2892,19 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap *cap;
struct ceph_mds_request_release *rel = *p;
+ int used, dirty;
int ret = 0;
- int used = 0;
spin_lock(&inode->i_lock);
used = __ceph_caps_used(ci);
+ dirty = __ceph_caps_dirty(ci);
- dout("encode_inode_release %p mds%d used %s drop %s unless %s\n", inode,
- mds, ceph_cap_string(used), ceph_cap_string(drop),
+ dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
+ inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
ceph_cap_string(unless));
- /* only drop unused caps */
- drop &= ~used;
+ /* only drop unused, clean caps */
+ drop &= ~(used | dirty);
cap = __get_cap_for_mds(ci, mds);
if (cap && __cap_is_valid(cap)) {
diff --git a/fs/ceph/crush/mapper.c b/fs/ceph/crush/mapper.c
index 9ba54efb6543..a4eec133258e 100644
--- a/fs/ceph/crush/mapper.c
+++ b/fs/ceph/crush/mapper.c
@@ -238,7 +238,7 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket,
static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
{
- dprintk("choose %d x=%d r=%d\n", in->id, x, r);
+ dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
switch (in->alg) {
case CRUSH_BUCKET_UNIFORM:
return bucket_uniform_choose((struct crush_bucket_uniform *)in,
@@ -264,7 +264,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
*/
static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
{
- if (weight[item] >= 0x1000)
+ if (weight[item] >= 0x10000)
return 0;
if (weight[item] == 0)
return 1;
@@ -305,7 +305,9 @@ static int crush_choose(struct crush_map *map,
int itemtype;
int collide, reject;
const int orig_tries = 5; /* attempts before we fall back to search */
- dprintk("choose bucket %d x %d outpos %d\n", bucket->id, x, outpos);
+
+ dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
+ bucket->id, x, outpos, numrep);
for (rep = outpos; rep < numrep; rep++) {
/* keep trying until we get a non-out, non-colliding item */
@@ -366,6 +368,7 @@ static int crush_choose(struct crush_map *map,
BUG_ON(item >= 0 ||
(-1-item) >= map->max_buckets);
in = map->buckets[-1-item];
+ retry_bucket = 1;
continue;
}
@@ -377,15 +380,25 @@ static int crush_choose(struct crush_map *map,
}
}
- if (recurse_to_leaf &&
- item < 0 &&
- crush_choose(map, map->buckets[-1-item],
- weight,
- x, outpos+1, 0,
- out2, outpos,
- firstn, 0, NULL) <= outpos) {
- reject = 1;
- } else {
+ reject = 0;
+ if (recurse_to_leaf) {
+ if (item < 0) {
+ if (crush_choose(map,
+ map->buckets[-1-item],
+ weight,
+ x, outpos+1, 0,
+ out2, outpos,
+ firstn, 0,
+ NULL) <= outpos)
+ /* didn't get leaf */
+ reject = 1;
+ } else {
+ /* we already have a leaf! */
+ out2[outpos] = item;
+ }
+ }
+
+ if (!reject) {
/* out? */
if (itemtype == 0)
reject = is_out(map, weight,
@@ -424,12 +437,12 @@ reject:
continue;
}
- dprintk("choose got %d\n", item);
+ dprintk("CHOOSE got %d\n", item);
out[outpos] = item;
outpos++;
}
- dprintk("choose returns %d\n", outpos);
+ dprintk("CHOOSE returns %d\n", outpos);
return outpos;
}
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 3be33fb066cc..f2f5332ddbba 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -261,7 +261,7 @@ static int osdc_show(struct seq_file *s, void *pp)
static int caps_show(struct seq_file *s, void *p)
{
- struct ceph_client *client = p;
+ struct ceph_client *client = s->private;
int total, avail, used, reserved, min;
ceph_reservation_status(client, &total, &avail, &used, &reserved, &min);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ab47f46ca282..8f9b9fe8ef9f 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -854,8 +854,8 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
d_drop(dn);
realdn = d_materialise_unique(dn, in);
if (IS_ERR(realdn)) {
- pr_err("splice_dentry error %p inode %p ino %llx.%llx\n",
- dn, in, ceph_vinop(in));
+ pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
+ PTR_ERR(realdn), dn, in, ceph_vinop(in));
if (prehash)
*prehash = false; /* don't rehash on error */
dn = realdn; /* note realdn contains the error */
@@ -1234,18 +1234,23 @@ retry_lookup:
goto out;
}
dn = splice_dentry(dn, in, NULL);
+ if (IS_ERR(dn))
+ dn = NULL;
}
if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
req->r_request_started, -1,
&req->r_caps_reservation) < 0) {
pr_err("fill_inode badness on %p\n", in);
- dput(dn);
- continue;
+ goto next_item;
}
- update_dentry_lease(dn, rinfo->dir_dlease[i],
- req->r_session, req->r_request_started);
- dput(dn);
+ if (dn)
+ update_dentry_lease(dn, rinfo->dir_dlease[i],
+ req->r_session,
+ req->r_request_started);
+next_item:
+ if (dn)
+ dput(dn);
}
req->r_did_prepopulate = true;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 1766947fc07a..3ab79f6c4ce8 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2783,6 +2783,12 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
drop_leases(mdsc);
ceph_flush_dirty_caps(mdsc);
wait_requests(mdsc);
+
+ /*
+ * wait for reply handlers to drop their request refs and
+ * their inode/dcache refs
+ */
+ ceph_msgr_flush();
}
/*
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
index 64b8b1f7863d..9ad43a310a41 100644
--- a/fs/ceph/messenger.c
+++ b/fs/ceph/messenger.c
@@ -657,7 +657,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr,
dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
con->connect_seq, global_seq, proto);
- con->out_connect.features = CEPH_FEATURE_SUPPORTED_CLIENT;
+ con->out_connect.features = cpu_to_le64(CEPH_FEATURE_SUPPORTED_CLIENT);
con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
con->out_connect.global_seq = cpu_to_le32(global_seq);
@@ -1396,10 +1396,12 @@ static int read_partial_message(struct ceph_connection *con)
if (!con->in_msg) {
dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
con->in_hdr.front_len, con->in_hdr.data_len);
+ skip = 0;
con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
if (skip) {
/* skip this message */
dout("alloc_msg said skip message\n");
+ BUG_ON(con->in_msg);
con->in_base_pos = -front_len - middle_len - data_len -
sizeof(m->footer);
con->in_tag = CEPH_MSGR_TAG_READY;
diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c
index 07a539906e67..cc115eafae11 100644
--- a/fs/ceph/mon_client.c
+++ b/fs/ceph/mon_client.c
@@ -725,7 +725,8 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
dout("authenticated, starting session\n");
monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
- monc->client->msgr->inst.name.num = monc->auth->global_id;
+ monc->client->msgr->inst.name.num =
+ cpu_to_le64(monc->auth->global_id);
__send_subscribe(monc);
__resend_generic_request(monc);
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
index d25b4add85b4..92b7251a53f1 100644
--- a/fs/ceph/osd_client.c
+++ b/fs/ceph/osd_client.c
@@ -1344,7 +1344,7 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
int type = le16_to_cpu(msg->hdr.type);
if (!osd)
- return;
+ goto out;
osdc = osd->o_osdc;
switch (type) {
@@ -1359,6 +1359,7 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
pr_err("received unknown message type %d %s\n", type,
ceph_msg_type_name(type));
}
+out:
ceph_msg_put(msg);
}
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
index ddc656fb5c05..50ce64ebd330 100644
--- a/fs/ceph/osdmap.c
+++ b/fs/ceph/osdmap.c
@@ -707,6 +707,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
newcrush = crush_decode(*p, min(*p+len, end));
if (IS_ERR(newcrush))
return ERR_CAST(newcrush);
+ *p += len;
}
/* new flags? */
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 0609607d3955..d5be1693ac93 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -38,43 +38,18 @@ int nr_pdflush_threads;
/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
-struct wb_writeback_args {
+struct wb_writeback_work {
long nr_pages;
struct super_block *sb;
enum writeback_sync_modes sync_mode;
unsigned int for_kupdate:1;
unsigned int range_cyclic:1;
unsigned int for_background:1;
-};
-/*
- * Work items for the bdi_writeback threads
- */
-struct bdi_work {
struct list_head list; /* pending work list */
- struct rcu_head rcu_head; /* for RCU free/clear of work */
-
- unsigned long seen; /* threads that have seen this work */
- atomic_t pending; /* number of threads still to do work */
-
- struct wb_writeback_args args; /* writeback arguments */
-
- unsigned long state; /* flag bits, see WS_* */
+ struct completion *done; /* set if the caller waits */
};
-enum {
- WS_INPROGRESS = 0,
- WS_ONSTACK,
-};
-
-static inline void bdi_work_init(struct bdi_work *work,
- struct wb_writeback_args *args)
-{
- INIT_RCU_HEAD(&work->rcu_head);
- work->args = *args;
- __set_bit(WS_INPROGRESS, &work->state);
-}
-
/**
* writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure.
@@ -87,49 +62,11 @@ int writeback_in_progress(struct backing_dev_info *bdi)
return !list_empty(&bdi->work_list);
}
-static void bdi_work_free(struct rcu_head *head)
-{
- struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
-
- clear_bit(WS_INPROGRESS, &work->state);
- smp_mb__after_clear_bit();
- wake_up_bit(&work->state, WS_INPROGRESS);
-
- if (!test_bit(WS_ONSTACK, &work->state))
- kfree(work);
-}
-
-static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
+static void bdi_queue_work(struct backing_dev_info *bdi,
+ struct wb_writeback_work *work)
{
- /*
- * The caller has retrieved the work arguments from this work,
- * drop our reference. If this is the last ref, delete and free it
- */
- if (atomic_dec_and_test(&work->pending)) {
- struct backing_dev_info *bdi = wb->bdi;
-
- spin_lock(&bdi->wb_lock);
- list_del_rcu(&work->list);
- spin_unlock(&bdi->wb_lock);
-
- call_rcu(&work->rcu_head, bdi_work_free);
- }
-}
-
-static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
-{
- work->seen = bdi->wb_mask;
- BUG_ON(!work->seen);
- atomic_set(&work->pending, bdi->wb_cnt);
- BUG_ON(!bdi->wb_cnt);
-
- /*
- * list_add_tail_rcu() contains the necessary barriers to
- * make sure the above stores are seen before the item is
- * noticed on the list
- */
spin_lock(&bdi->wb_lock);
- list_add_tail_rcu(&work->list, &bdi->work_list);
+ list_add_tail(&work->list, &bdi->work_list);
spin_unlock(&bdi->wb_lock);
/*
@@ -146,55 +83,29 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
}
}
-/*
- * Used for on-stack allocated work items. The caller needs to wait until
- * the wb threads have acked the work before it's safe to continue.
- */
-static void bdi_wait_on_work_done(struct bdi_work *work)
+static void
+__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
+ bool range_cyclic, bool for_background)
{
- wait_on_bit(&work->state, WS_INPROGRESS, bdi_sched_wait,
- TASK_UNINTERRUPTIBLE);
-}
-
-static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
- struct wb_writeback_args *args)
-{
- struct bdi_work *work;
+ struct wb_writeback_work *work;
/*
* This is WB_SYNC_NONE writeback, so if allocation fails just
* wakeup the thread for old dirty data writeback
*/
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- bdi_work_init(work, args);
- bdi_queue_work(bdi, work);
- } else {
- struct bdi_writeback *wb = &bdi->wb;
-
- if (wb->task)
- wake_up_process(wb->task);
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ if (bdi->wb.task)
+ wake_up_process(bdi->wb.task);
+ return;
}
-}
-/**
- * bdi_queue_work_onstack - start and wait for writeback
- * @sb: write inodes from this super_block
- *
- * Description:
- * This function initiates writeback and waits for the operation to
- * complete. Callers must hold the sb s_umount semaphore for
- * reading, to avoid having the super disappear before we are done.
- */
-static void bdi_queue_work_onstack(struct wb_writeback_args *args)
-{
- struct bdi_work work;
-
- bdi_work_init(&work, args);
- __set_bit(WS_ONSTACK, &work.state);
+ work->sync_mode = WB_SYNC_NONE;
+ work->nr_pages = nr_pages;
+ work->range_cyclic = range_cyclic;
+ work->for_background = for_background;
- bdi_queue_work(args->sb->s_bdi, &work);
- bdi_wait_on_work_done(&work);
+ bdi_queue_work(bdi, work);
}
/**
@@ -210,13 +121,7 @@ static void bdi_queue_work_onstack(struct wb_writeback_args *args)
*/
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
{
- struct wb_writeback_args args = {
- .sync_mode = WB_SYNC_NONE,
- .nr_pages = nr_pages,
- .range_cyclic = 1,
- };
-
- bdi_alloc_queue_work(bdi, &args);
+ __bdi_start_writeback(bdi, nr_pages, true, false);
}
/**
@@ -230,13 +135,7 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
*/
void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
- struct wb_writeback_args args = {
- .sync_mode = WB_SYNC_NONE,
- .nr_pages = LONG_MAX,
- .for_background = 1,
- .range_cyclic = 1,
- };
- bdi_alloc_queue_work(bdi, &args);
+ __bdi_start_writeback(bdi, LONG_MAX, true, true);
}
/*
@@ -554,29 +453,41 @@ static bool pin_sb_for_writeback(struct super_block *sb)
/*
* Write a portion of b_io inodes which belong to @sb.
- * If @wbc->sb != NULL, then find and write all such
+ *
+ * If @only_this_sb is true, then find and write all such
* inodes. Otherwise write only ones which go sequentially
* in reverse order.
+ *
* Return 1, if the caller writeback routine should be
* interrupted. Otherwise return 0.
*/
-static int writeback_sb_inodes(struct super_block *sb,
- struct bdi_writeback *wb,
- struct writeback_control *wbc)
+static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
+ struct writeback_control *wbc, bool only_this_sb)
{
while (!list_empty(&wb->b_io)) {
long pages_skipped;
struct inode *inode = list_entry(wb->b_io.prev,
struct inode, i_list);
- if (wbc->sb && sb != inode->i_sb) {
- /* super block given and doesn't
- match, skip this inode */
- redirty_tail(inode);
- continue;
- }
- if (sb != inode->i_sb)
- /* finish with this superblock */
+
+ if (inode->i_sb != sb) {
+ if (only_this_sb) {
+ /*
+ * We only want to write back data for this
+ * superblock, move all inodes not belonging
+ * to it back onto the dirty list.
+ */
+ redirty_tail(inode);
+ continue;
+ }
+
+ /*
+ * The inode belongs to a different superblock.
+ * Bounce back to the caller to unpin this and
+ * pin the next superblock.
+ */
return 0;
+ }
+
if (inode->i_state & (I_NEW | I_WILL_FREE)) {
requeue_io(inode);
continue;
@@ -614,8 +525,8 @@ static int writeback_sb_inodes(struct super_block *sb,
return 1;
}
-static void writeback_inodes_wb(struct bdi_writeback *wb,
- struct writeback_control *wbc)
+void writeback_inodes_wb(struct bdi_writeback *wb,
+ struct writeback_control *wbc)
{
int ret = 0;
@@ -629,29 +540,12 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
struct inode, i_list);
struct super_block *sb = inode->i_sb;
- if (wbc->sb) {
- /*
- * We are requested to write out inodes for a specific
- * superblock. This means we already have s_umount
- * taken by the caller which also waits for us to
- * complete the writeout.
- */
- if (sb != wbc->sb) {
- redirty_tail(inode);
- continue;
- }
-
- WARN_ON(!rwsem_is_locked(&sb->s_umount));
-
- ret = writeback_sb_inodes(sb, wb, wbc);
- } else {
- if (!pin_sb_for_writeback(sb)) {
- requeue_io(inode);
- continue;
- }
- ret = writeback_sb_inodes(sb, wb, wbc);
- drop_super(sb);
+ if (!pin_sb_for_writeback(sb)) {
+ requeue_io(inode);
+ continue;
}
+ ret = writeback_sb_inodes(sb, wb, wbc, false);
+ drop_super(sb);
if (ret)
break;
@@ -660,11 +554,17 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
/* Leave any unwritten inodes on b_io */
}
-void writeback_inodes_wbc(struct writeback_control *wbc)
+static void __writeback_inodes_sb(struct super_block *sb,
+ struct bdi_writeback *wb, struct writeback_control *wbc)
{
- struct backing_dev_info *bdi = wbc->bdi;
+ WARN_ON(!rwsem_is_locked(&sb->s_umount));
- writeback_inodes_wb(&bdi->wb, wbc);
+ wbc->wb_start = jiffies; /* livelock avoidance */
+ spin_lock(&inode_lock);
+ if (!wbc->for_kupdate || list_empty(&wb->b_io))
+ queue_io(wb, wbc->older_than_this);
+ writeback_sb_inodes(sb, wb, wbc, true);
+ spin_unlock(&inode_lock);
}
/*
@@ -702,16 +602,14 @@ static inline bool over_bground_thresh(void)
* all dirty pages if they are all attached to "old" mappings.
*/
static long wb_writeback(struct bdi_writeback *wb,
- struct wb_writeback_args *args)
+ struct wb_writeback_work *work)
{
struct writeback_control wbc = {
- .bdi = wb->bdi,
- .sb = args->sb,
- .sync_mode = args->sync_mode,
+ .sync_mode = work->sync_mode,
.older_than_this = NULL,
- .for_kupdate = args->for_kupdate,
- .for_background = args->for_background,
- .range_cyclic = args->range_cyclic,
+ .for_kupdate = work->for_kupdate,
+ .for_background = work->for_background,
+ .range_cyclic = work->range_cyclic,
};
unsigned long oldest_jif;
long wrote = 0;
@@ -731,21 +629,24 @@ static long wb_writeback(struct bdi_writeback *wb,
/*
* Stop writeback when nr_pages has been consumed
*/
- if (args->nr_pages <= 0)
+ if (work->nr_pages <= 0)
break;
/*
* For background writeout, stop when we are below the
* background dirty threshold
*/
- if (args->for_background && !over_bground_thresh())
+ if (work->for_background && !over_bground_thresh())
break;
wbc.more_io = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES;
wbc.pages_skipped = 0;
- writeback_inodes_wb(wb, &wbc);
- args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
+ if (work->sb)
+ __writeback_inodes_sb(work->sb, wb, &wbc);
+ else
+ writeback_inodes_wb(wb, &wbc);
+ work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
/*
@@ -781,31 +682,21 @@ static long wb_writeback(struct bdi_writeback *wb,
}
/*
- * Return the next bdi_work struct that hasn't been processed by this
- * wb thread yet. ->seen is initially set for each thread that exists
- * for this device, when a thread first notices a piece of work it
- * clears its bit. Depending on writeback type, the thread will notify
- * completion on either receiving the work (WB_SYNC_NONE) or after
- * it is done (WB_SYNC_ALL).
+ * Return the next wb_writeback_work struct that hasn't been processed yet.
*/
-static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
- struct bdi_writeback *wb)
+static struct wb_writeback_work *
+get_next_work_item(struct backing_dev_info *bdi, struct bdi_writeback *wb)
{
- struct bdi_work *work, *ret = NULL;
-
- rcu_read_lock();
+ struct wb_writeback_work *work = NULL;
- list_for_each_entry_rcu(work, &bdi->work_list, list) {
- if (!test_bit(wb->nr, &work->seen))
- continue;
- clear_bit(wb->nr, &work->seen);
-
- ret = work;
- break;
+ spin_lock(&bdi->wb_lock);
+ if (!list_empty(&bdi->work_list)) {
+ work = list_entry(bdi->work_list.next,
+ struct wb_writeback_work, list);
+ list_del_init(&work->list);
}
-
- rcu_read_unlock();
- return ret;
+ spin_unlock(&bdi->wb_lock);
+ return work;
}
static long wb_check_old_data_flush(struct bdi_writeback *wb)
@@ -830,14 +721,14 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
if (nr_pages) {
- struct wb_writeback_args args = {
+ struct wb_writeback_work work = {
.nr_pages = nr_pages,
.sync_mode = WB_SYNC_NONE,
.for_kupdate = 1,
.range_cyclic = 1,
};
- return wb_writeback(wb, &args);
+ return wb_writeback(wb, &work);
}
return 0;
@@ -849,33 +740,27 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
{
struct backing_dev_info *bdi = wb->bdi;
- struct bdi_work *work;
+ struct wb_writeback_work *work;
long wrote = 0;
while ((work = get_next_work_item(bdi, wb)) != NULL) {
- struct wb_writeback_args args = work->args;
-
/*
* Override sync mode, in case we must wait for completion
+ * because this thread is exiting now.
*/
if (force_wait)
- work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
-
- /*
- * If this isn't a data integrity operation, just notify
- * that we have seen this work and we are now starting it.
- */
- if (!test_bit(WS_ONSTACK, &work->state))
- wb_clear_pending(wb, work);
+ work->sync_mode = WB_SYNC_ALL;
- wrote += wb_writeback(wb, &args);
+ wrote += wb_writeback(wb, work);
/*
- * This is a data integrity writeback, so only do the
- * notification when we have completed the work.
+ * Notify the caller of completion if this is a synchronous
+ * work item, otherwise just free it.
*/
- if (test_bit(WS_ONSTACK, &work->state))
- wb_clear_pending(wb, work);
+ if (work->done)
+ complete(work->done);
+ else
+ kfree(work);
}
/*
@@ -938,14 +823,9 @@ int bdi_writeback_task(struct bdi_writeback *wb)
void wakeup_flusher_threads(long nr_pages)
{
struct backing_dev_info *bdi;
- struct wb_writeback_args args = {
- .sync_mode = WB_SYNC_NONE,
- };
- if (nr_pages) {
- args.nr_pages = nr_pages;
- } else {
- args.nr_pages = global_page_state(NR_FILE_DIRTY) +
+ if (!nr_pages) {
+ nr_pages = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
}
@@ -953,7 +833,7 @@ void wakeup_flusher_threads(long nr_pages)
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
if (!bdi_has_dirty_io(bdi))
continue;
- bdi_alloc_queue_work(bdi, &args);
+ __bdi_start_writeback(bdi, nr_pages, false, false);
}
rcu_read_unlock();
}
@@ -1162,17 +1042,20 @@ void writeback_inodes_sb(struct super_block *sb)
{
unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
- struct wb_writeback_args args = {
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_NONE,
+ .done = &done,
};
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- args.nr_pages = nr_dirty + nr_unstable +
+ work.nr_pages = nr_dirty + nr_unstable +
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
- bdi_queue_work_onstack(&args);
+ bdi_queue_work(sb->s_bdi, &work);
+ wait_for_completion(&done);
}
EXPORT_SYMBOL(writeback_inodes_sb);
@@ -1204,16 +1087,20 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
*/
void sync_inodes_sb(struct super_block *sb)
{
- struct wb_writeback_args args = {
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_ALL,
.nr_pages = LONG_MAX,
.range_cyclic = 0,
+ .done = &done,
};
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- bdi_queue_work_onstack(&args);
+ bdi_queue_work(sb->s_bdi, &work);
+ wait_for_completion(&done);
+
wait_sb_inodes(sb);
}
EXPORT_SYMBOL(sync_inodes_sb);
diff --git a/fs/splice.c b/fs/splice.c
index 740e6b9faf7a..efdbfece9932 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1282,7 +1282,8 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
{
struct file *file = sd->u.file;
- return do_splice_from(pipe, file, &sd->pos, sd->total_len, sd->flags);
+ return do_splice_from(pipe, file, &file->f_pos, sd->total_len,
+ sd->flags);
}
/**
@@ -1371,8 +1372,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
if (off_in)
return -ESPIPE;
if (off_out) {
- if (!out->f_op || !out->f_op->llseek ||
- out->f_op->llseek == no_llseek)
+ if (!(out->f_mode & FMODE_PWRITE))
return -EINVAL;
if (copy_from_user(&offset, off_out, sizeof(loff_t)))
return -EFAULT;
@@ -1392,8 +1392,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
if (off_out)
return -ESPIPE;
if (off_in) {
- if (!in->f_op || !in->f_op->llseek ||
- in->f_op->llseek == no_llseek)
+ if (!(in->f_mode & FMODE_PREAD))
return -EINVAL;
if (copy_from_user(&offset, off_in, sizeof(loff_t)))
return -EFAULT;
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 8bb4de567b2c..116821448c38 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -56,10 +56,6 @@ void ttm_put_pages(struct list_head *pages,
enum ttm_caching_state cstate);
/**
* Initialize pool allocator.
- *
- * Pool allocator is internaly reference counted so it can be initialized
- * multiple times but ttm_page_alloc_fini has to be called same number of
- * times.
*/
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
/**
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 9ae2889096b6..e9aec0d099df 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -82,8 +82,6 @@ struct backing_dev_info {
struct bdi_writeback wb; /* default writeback info for this bdi */
spinlock_t wb_lock; /* protects update side of wb_list */
struct list_head wb_list; /* the flusher threads hanging off this bdi */
- unsigned long wb_mask; /* bitmask of registered tasks */
- unsigned int wb_cnt; /* number of registered tasks */
struct list_head work_list;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 276b40a16835..b4207ca3ad52 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -379,6 +379,8 @@ struct ethtool_rxnfc {
__u32 flow_type;
/* The rx flow hash value or the rule DB size */
__u64 data;
+ /* The following fields are not valid and must not be used for
+ * the ETHTOOL_{G,X}RXFH commands. */
struct ethtool_rx_flow_spec fs;
__u32 rule_cnt;
__u32 rule_locs[0];
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 471e1ff5079a..68ca1b0491af 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1783,6 +1783,19 @@ extern int get_sb_pseudo(struct file_system_type *, char *,
struct vfsmount *mnt);
extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
+static inline void sb_mark_dirty(struct super_block *sb)
+{
+ sb->s_dirt = 1;
+}
+static inline void sb_mark_clean(struct super_block *sb)
+{
+ sb->s_dirt = 0;
+}
+static inline int sb_is_dirty(struct super_block *sb)
+{
+ return sb->s_dirt;
+}
+
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \
(((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index cbbbe9bfecad..30b0c4e78f91 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -19,6 +19,11 @@ struct mv643xx_eth_shared_platform_data {
struct mbus_dram_target_info *dram;
struct platform_device *shared_smi;
unsigned int t_clk;
+ /*
+ * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default
+ * limit of 9KiB will be used.
+ */
+ int tx_csum_limit;
};
#define MV643XX_ETH_PHY_ADDR_DEFAULT 0
diff --git a/include/linux/net.h b/include/linux/net.h
index 2b4deeeb8646..dee0b11a8759 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -129,10 +129,9 @@ struct socket_wq {
* @type: socket type (%SOCK_STREAM, etc)
* @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc)
* @ops: protocol specific socket operations
- * @fasync_list: Asynchronous wake up list
* @file: File back pointer for gc
* @sk: internal networking protocol agnostic socket representation
- * @wait: wait queue for several uses
+ * @wq: wait queue for several uses
*/
struct socket {
socket_state state;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 40291f375024..b21e4054c12c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1656,6 +1656,9 @@ static inline int netif_is_multiqueue(const struct net_device *dev)
return (dev->num_tx_queues > 1);
}
+extern void netif_set_real_num_tx_queues(struct net_device *dev,
+ unsigned int txq);
+
/* Use this variant when it is known for sure that it
* is executing from hardware interrupt context or with hardware interrupts
* disabled.
@@ -2329,7 +2332,7 @@ do { \
#endif
#if defined(VERBOSE_DEBUG)
-#define netif_vdbg netdev_dbg
+#define netif_vdbg netif_dbg
#else
#define netif_vdbg(priv, type, dev, format, args...) \
({ \
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index fe1872e5b37e..7066acb2c530 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -110,7 +110,6 @@ struct rb_node
struct rb_root
{
struct rb_node *rb_node;
- void (*augment_cb)(struct rb_node *node);
};
@@ -130,9 +129,7 @@ static inline void rb_set_color(struct rb_node *rb, int color)
rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
}
-#define RB_ROOT (struct rb_root) { NULL, NULL, }
-#define RB_AUGMENT_ROOT(x) (struct rb_root) { NULL, x}
-
+#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
@@ -142,6 +139,14 @@ static inline void rb_set_color(struct rb_node *rb, int color)
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
+typedef void (*rb_augment_f)(struct rb_node *node, void *data);
+
+extern void rb_augment_insert(struct rb_node *node,
+ rb_augment_f func, void *data);
+extern struct rb_node *rb_augment_erase_begin(struct rb_node *node);
+extern void rb_augment_erase_end(struct rb_node *node,
+ rb_augment_f func, void *data);
+
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index d63ef8f9609f..c24eca71e80c 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -27,10 +27,6 @@ enum writeback_sync_modes {
* in a manner such that unspecified fields are set to zero.
*/
struct writeback_control {
- struct backing_dev_info *bdi; /* If !NULL, only write back this
- queue */
- struct super_block *sb; /* if !NULL, only write inodes from
- this super_block */
enum writeback_sync_modes sync_mode;
unsigned long *older_than_this; /* If !NULL, only write back inodes
older than this */
@@ -66,7 +62,8 @@ int inode_wait(void *);
void writeback_inodes_sb(struct super_block *);
int writeback_inodes_sb_if_idle(struct super_block *);
void sync_inodes_sb(struct super_block *);
-void writeback_inodes_wbc(struct writeback_control *wbc);
+void writeback_inodes_wb(struct bdi_writeback *wb,
+ struct writeback_control *wbc);
long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
void wakeup_flusher_threads(long nr_pages);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 03ca5d826757..433604bb3fe8 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -313,12 +313,24 @@ extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
extern void tcf_destroy(struct tcf_proto *tp);
extern void tcf_destroy_chain(struct tcf_proto **fl);
-/* Reset all TX qdiscs of a device. */
+/* Reset all TX qdiscs greater then index of a device. */
+static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
+{
+ struct Qdisc *qdisc;
+
+ for (; i < dev->num_tx_queues; i++) {
+ qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ if (qdisc) {
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+ }
+}
+
static inline void qdisc_reset_all_tx(struct net_device *dev)
{
- unsigned int i;
- for (i = 0; i < dev->num_tx_queues; i++)
- qdisc_reset(netdev_get_tx_queue(dev, i)->qdisc);
+ qdisc_reset_all_tx_gt(dev, 0);
}
/* Are all TX queues of the device empty? */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 1913af67c43d..fc8f36dd0f5c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1586,7 +1586,7 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
{
if (attrs[XFRMA_MARK])
- memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(m));
+ memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
else
m->v = m->m = 0;
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 15e10b1afdd2..4693f79195d3 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -44,11 +44,6 @@ static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
else
root->rb_node = right;
rb_set_parent(node, right);
-
- if (root->augment_cb) {
- root->augment_cb(node);
- root->augment_cb(right);
- }
}
static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
@@ -72,20 +67,12 @@ static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
else
root->rb_node = left;
rb_set_parent(node, left);
-
- if (root->augment_cb) {
- root->augment_cb(node);
- root->augment_cb(left);
- }
}
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
struct rb_node *parent, *gparent;
- if (root->augment_cb)
- root->augment_cb(node);
-
while ((parent = rb_parent(node)) && rb_is_red(parent))
{
gparent = rb_parent(parent);
@@ -240,15 +227,12 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
else
{
struct rb_node *old = node, *left;
- int old_parent_cb = 0;
- int successor_parent_cb = 0;
node = node->rb_right;
while ((left = node->rb_left) != NULL)
node = left;
if (rb_parent(old)) {
- old_parent_cb = 1;
if (rb_parent(old)->rb_left == old)
rb_parent(old)->rb_left = node;
else
@@ -263,10 +247,8 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
if (parent == old) {
parent = node;
} else {
- successor_parent_cb = 1;
if (child)
rb_set_parent(child, parent);
-
parent->rb_left = child;
node->rb_right = old->rb_right;
@@ -277,24 +259,6 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
node->rb_left = old->rb_left;
rb_set_parent(old->rb_left, node);
- if (root->augment_cb) {
- /*
- * Here, three different nodes can have new children.
- * The parent of the successor node that was selected
- * to replace the node to be erased.
- * The node that is getting erased and is now replaced
- * by its successor.
- * The parent of the node getting erased-replaced.
- */
- if (successor_parent_cb)
- root->augment_cb(parent);
-
- root->augment_cb(node);
-
- if (old_parent_cb)
- root->augment_cb(rb_parent(old));
- }
-
goto color;
}
@@ -303,19 +267,15 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
if (child)
rb_set_parent(child, parent);
-
- if (parent) {
+ if (parent)
+ {
if (parent->rb_left == node)
parent->rb_left = child;
else
parent->rb_right = child;
-
- if (root->augment_cb)
- root->augment_cb(parent);
-
- } else {
- root->rb_node = child;
}
+ else
+ root->rb_node = child;
color:
if (color == RB_BLACK)
@@ -323,6 +283,74 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
}
EXPORT_SYMBOL(rb_erase);
+static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data)
+{
+ struct rb_node *parent;
+
+up:
+ func(node, data);
+ parent = rb_parent(node);
+ if (!parent)
+ return;
+
+ if (node == parent->rb_left && parent->rb_right)
+ func(parent->rb_right, data);
+ else if (parent->rb_left)
+ func(parent->rb_left, data);
+
+ node = parent;
+ goto up;
+}
+
+/*
+ * after inserting @node into the tree, update the tree to account for
+ * both the new entry and any damage done by rebalance
+ */
+void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
+{
+ if (node->rb_left)
+ node = node->rb_left;
+ else if (node->rb_right)
+ node = node->rb_right;
+
+ rb_augment_path(node, func, data);
+}
+
+/*
+ * before removing the node, find the deepest node on the rebalance path
+ * that will still be there after @node gets removed
+ */
+struct rb_node *rb_augment_erase_begin(struct rb_node *node)
+{
+ struct rb_node *deepest;
+
+ if (!node->rb_right && !node->rb_left)
+ deepest = rb_parent(node);
+ else if (!node->rb_right)
+ deepest = node->rb_left;
+ else if (!node->rb_left)
+ deepest = node->rb_right;
+ else {
+ deepest = rb_next(node);
+ if (deepest->rb_right)
+ deepest = deepest->rb_right;
+ else if (rb_parent(deepest) != node)
+ deepest = rb_parent(deepest);
+ }
+
+ return deepest;
+}
+
+/*
+ * after removal, update the tree to account for the removed entry
+ * and any rebalance damage.
+ */
+void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
+{
+ if (node)
+ rb_augment_path(node, func, data);
+}
+
/*
* This function returns the first node (in sort order) of the tree.
*/
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 660a87a22511..123bcef13e51 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -104,15 +104,13 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
"b_more_io: %8lu\n"
"bdi_list: %8u\n"
"state: %8lx\n"
- "wb_mask: %8lx\n"
- "wb_list: %8u\n"
- "wb_cnt: %8u\n",
+ "wb_list: %8u\n",
(unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
(unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
K(bdi_thresh), K(dirty_thresh),
K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io,
- !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask,
- !list_empty(&bdi->wb_list), bdi->wb_cnt);
+ !list_empty(&bdi->bdi_list), bdi->state,
+ !list_empty(&bdi->wb_list));
#undef K
return 0;
@@ -340,14 +338,13 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
static void bdi_flush_io(struct backing_dev_info *bdi)
{
struct writeback_control wbc = {
- .bdi = bdi,
.sync_mode = WB_SYNC_NONE,
.older_than_this = NULL,
.range_cyclic = 1,
.nr_to_write = 1024,
};
- writeback_inodes_wbc(&wbc);
+ writeback_inodes_wb(&bdi->wb, &wbc);
}
/*
@@ -675,12 +672,6 @@ int bdi_init(struct backing_dev_info *bdi)
bdi_wb_init(&bdi->wb, bdi);
- /*
- * Just one thread support for now, hard code mask and count
- */
- bdi->wb_mask = 1;
- bdi->wb_cnt = 1;
-
for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
err = percpu_counter_init(&bdi->bdi_stat[i], 0);
if (err)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 54f28bd493d3..37498ef61548 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -495,7 +495,6 @@ static void balance_dirty_pages(struct address_space *mapping,
for (;;) {
struct writeback_control wbc = {
- .bdi = bdi,
.sync_mode = WB_SYNC_NONE,
.older_than_this = NULL,
.nr_to_write = write_chunk,
@@ -537,7 +536,7 @@ static void balance_dirty_pages(struct address_space *mapping,
* up.
*/
if (bdi_nr_reclaimable > bdi_thresh) {
- writeback_inodes_wbc(&wbc);
+ writeback_inodes_wb(&bdi->wb, &wbc);
pages_written += write_chunk - wbc.nr_to_write;
get_dirty_limits(&background_thresh, &dirty_thresh,
&bdi_thresh, bdi);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 9d21d98ae5fa..27ae946363f1 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -99,6 +99,15 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
return NULL;
}
+static struct net_bridge_mdb_entry *br_mdb_ip_get(
+ struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
+{
+ if (!mdb)
+ return NULL;
+
+ return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
+}
+
static struct net_bridge_mdb_entry *br_mdb_ip4_get(
struct net_bridge_mdb_htable *mdb, __be32 dst)
{
@@ -107,7 +116,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get(
br_dst.u.ip4 = dst;
br_dst.proto = htons(ETH_P_IP);
- return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst));
+ return br_mdb_ip_get(mdb, &br_dst);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -119,23 +128,17 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
ipv6_addr_copy(&br_dst.u.ip6, dst);
br_dst.proto = htons(ETH_P_IPV6);
- return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst));
+ return br_mdb_ip_get(mdb, &br_dst);
}
#endif
-static struct net_bridge_mdb_entry *br_mdb_ip_get(
- struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
-{
- return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
-}
-
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
struct sk_buff *skb)
{
struct net_bridge_mdb_htable *mdb = br->mdb;
struct br_ip ip;
- if (!mdb || br->multicast_disabled)
+ if (br->multicast_disabled)
return NULL;
if (BR_INPUT_SKB_CB(skb)->igmp)
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 44420992f72f..8fb75f89c4aa 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -591,6 +591,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
pskb_trim_rcsum(skb, len);
+ /* BUG: Should really parse the IP options here. */
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+
nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb))
return NF_DROP;
diff --git a/net/core/dev.c b/net/core/dev.c
index 2b3bf53bc687..723a34710ad4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1553,6 +1553,24 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_unlock();
}
+/*
+ * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
+ * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
+ */
+void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+{
+ unsigned int real_num = dev->real_num_tx_queues;
+
+ if (unlikely(txq > dev->num_tx_queues))
+ ;
+ else if (txq > real_num)
+ dev->real_num_tx_queues = txq;
+ else if (txq < real_num) {
+ dev->real_num_tx_queues = txq;
+ qdisc_reset_all_tx_gt(dev, txq);
+ }
+}
+EXPORT_SYMBOL(netif_set_real_num_tx_queues);
static inline void __netif_reschedule(struct Qdisc *q)
{
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a0f4964033d2..75e4ffeb8cc9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -318,23 +318,33 @@ out:
}
static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
- void __user *useraddr)
+ u32 cmd, void __user *useraddr)
{
- struct ethtool_rxnfc cmd;
+ struct ethtool_rxnfc info;
+ size_t info_size = sizeof(info);
if (!dev->ethtool_ops->set_rxnfc)
return -EOPNOTSUPP;
- if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ /* struct ethtool_rxnfc was originally defined for
+ * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
+ * members. User-space might still be using that
+ * definition. */
+ if (cmd == ETHTOOL_SRXFH)
+ info_size = (offsetof(struct ethtool_rxnfc, data) +
+ sizeof(info.data));
+
+ if (copy_from_user(&info, useraddr, info_size))
return -EFAULT;
- return dev->ethtool_ops->set_rxnfc(dev, &cmd);
+ return dev->ethtool_ops->set_rxnfc(dev, &info);
}
static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
- void __user *useraddr)
+ u32 cmd, void __user *useraddr)
{
struct ethtool_rxnfc info;
+ size_t info_size = sizeof(info);
const struct ethtool_ops *ops = dev->ethtool_ops;
int ret;
void *rule_buf = NULL;
@@ -342,13 +352,22 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
if (!ops->get_rxnfc)
return -EOPNOTSUPP;
- if (copy_from_user(&info, useraddr, sizeof(info)))
+ /* struct ethtool_rxnfc was originally defined for
+ * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
+ * members. User-space might still be using that
+ * definition. */
+ if (cmd == ETHTOOL_GRXFH)
+ info_size = (offsetof(struct ethtool_rxnfc, data) +
+ sizeof(info.data));
+
+ if (copy_from_user(&info, useraddr, info_size))
return -EFAULT;
if (info.cmd == ETHTOOL_GRXCLSRLALL) {
if (info.rule_cnt > 0) {
- rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
- GFP_USER);
+ if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
+ rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
+ GFP_USER);
if (!rule_buf)
return -ENOMEM;
}
@@ -359,7 +378,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
goto err_out;
ret = -EFAULT;
- if (copy_to_user(useraddr, &info, sizeof(info)))
+ if (copy_to_user(useraddr, &info, info_size))
goto err_out;
if (rule_buf) {
@@ -1516,12 +1535,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
- rc = ethtool_get_rxnfc(dev, useraddr);
+ rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
break;
case ETHTOOL_SRXFH:
case ETHTOOL_SRXCLSRLDEL:
case ETHTOOL_SRXCLSRLINS:
- rc = ethtool_set_rxnfc(dev, useraddr);
+ rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
break;
case ETHTOOL_GGRO:
rc = ethtool_get_gro(dev, useraddr);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 1705476670ef..23883a48ebfb 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -108,6 +108,8 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
memset(fl, 0, sizeof(struct flowi));
+ fl->mark = skb->mark;
+
if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
switch (iph->protocol) {
case IPPROTO_UDP:
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 47d227713758..2933396e0281 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -97,9 +97,11 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
fl.fl_ip_dport = otcph.source;
security_skb_classify_flow(oldskb, &fl);
dst = ip6_route_output(net, NULL, &fl);
- if (dst == NULL)
+ if (dst == NULL || dst->error) {
+ dst_release(dst);
return;
- if (dst->error || xfrm_lookup(net, &dst, &fl, NULL, 0))
+ }
+ if (xfrm_lookup(net, &dst, &fl, NULL, 0))
return;
hh_len = (dst->dev->hard_header_len + 15)&~15;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4a0e77e14468..6baeabbbca82 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -124,6 +124,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
u8 nexthdr = nh[IP6CB(skb)->nhoff];
memset(fl, 0, sizeof(struct flowi));
+ fl->mark = skb->mark;
+
ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr);
ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr);
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index f8779006986d..f6127b9f5aca 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -503,6 +503,11 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 ||
strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0)
return 1;
+ if (info->hdr->e_machine == EM_PPC64)
+ /* Special register function linked on all modules during final link of .ko */
+ if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
+ strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0)
+ return 1;
/* Do not ignore this symbol */
return 0;
}